Synchronize hinic driver of OLK-5.10 to OLK-6.6
Signed-off-by: Zhou Shuai <zhoushuai28(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic/Kconfig | 6 +-
drivers/net/ethernet/huawei/hinic/Makefile | 14 +-
.../net/ethernet/huawei/hinic/hinic_api_cmd.c | 1164 ++++
.../net/ethernet/huawei/hinic/hinic_api_cmd.h | 298 +
drivers/net/ethernet/huawei/hinic/hinic_cfg.c | 2478 ++++++++
drivers/net/ethernet/huawei/hinic/hinic_cfg.h | 526 ++
.../net/ethernet/huawei/hinic/hinic_cmdq.c | 1575 ++++++
.../net/ethernet/huawei/hinic/hinic_cmdq.h | 217 +
.../net/ethernet/huawei/hinic/hinic_common.c | 71 -
.../net/ethernet/huawei/hinic/hinic_common.h | 29 -
drivers/net/ethernet/huawei/hinic/hinic_csr.h | 207 +
.../net/ethernet/huawei/hinic/hinic_ctx_def.h | 242 +
drivers/net/ethernet/huawei/hinic/hinic_dbg.h | 93 +
.../ethernet/huawei/hinic/hinic_dbgtool_knl.c | 899 +++
.../ethernet/huawei/hinic/hinic_dbgtool_knl.h | 120 +
drivers/net/ethernet/huawei/hinic/hinic_dcb.c | 1796 ++++++
drivers/net/ethernet/huawei/hinic/hinic_dcb.h | 67 +
.../net/ethernet/huawei/hinic/hinic_debugfs.c | 324 --
.../net/ethernet/huawei/hinic/hinic_debugfs.h | 113 -
drivers/net/ethernet/huawei/hinic/hinic_dev.h | 132 -
.../net/ethernet/huawei/hinic/hinic_devlink.c | 584 --
.../net/ethernet/huawei/hinic/hinic_devlink.h | 119 -
.../net/ethernet/huawei/hinic/hinic_dfx_def.h | 150 +
drivers/net/ethernet/huawei/hinic/hinic_eqs.c | 1462 +++++
drivers/net/ethernet/huawei/hinic/hinic_eqs.h | 177 +
.../net/ethernet/huawei/hinic/hinic_ethtool.c | 3078 ++++++----
drivers/net/ethernet/huawei/hinic/hinic_hw.h | 770 +++
.../ethernet/huawei/hinic/hinic_hw_api_cmd.c | 987 ----
.../ethernet/huawei/hinic/hinic_hw_api_cmd.h | 203 -
.../net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 963 ----
.../net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 185 -
.../net/ethernet/huawei/hinic/hinic_hw_csr.h | 139 -
.../net/ethernet/huawei/hinic/hinic_hw_dev.c | 1197 ----
.../net/ethernet/huawei/hinic/hinic_hw_dev.h | 660 ---
.../net/ethernet/huawei/hinic/hinic_hw_eqs.c | 996 ----
.../net/ethernet/huawei/hinic/hinic_hw_eqs.h | 262 -
.../net/ethernet/huawei/hinic/hinic_hw_if.c | 419 --
.../net/ethernet/huawei/hinic/hinic_hw_if.h | 295 -
.../net/ethernet/huawei/hinic/hinic_hw_io.c | 634 ---
.../net/ethernet/huawei/hinic/hinic_hw_io.h | 123 -
.../net/ethernet/huawei/hinic/hinic_hw_mbox.h | 172 -
.../net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 681 ---
.../net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 636 ++-
.../net/ethernet/huawei/hinic/hinic_hw_qp.c | 971 ----
.../net/ethernet/huawei/hinic/hinic_hw_qp.h | 232 -
.../ethernet/huawei/hinic/hinic_hw_qp_ctxt.h | 210 -
.../net/ethernet/huawei/hinic/hinic_hw_wq.c | 904 ---
.../net/ethernet/huawei/hinic/hinic_hw_wq.h | 111 -
.../net/ethernet/huawei/hinic/hinic_hw_wqe.h | 430 --
.../net/ethernet/huawei/hinic/hinic_hwdev.c | 5001 +++++++++++++++++
.../net/ethernet/huawei/hinic/hinic_hwdev.h | 370 ++
.../net/ethernet/huawei/hinic/hinic_hwif.c | 1005 ++++
.../net/ethernet/huawei/hinic/hinic_hwif.h | 123 +
drivers/net/ethernet/huawei/hinic/hinic_lld.c | 2923 ++++++++++
drivers/net/ethernet/huawei/hinic/hinic_lld.h | 128 +
.../net/ethernet/huawei/hinic/hinic_main.c | 3848 +++++++++----
.../hinic/{hinic_hw_mbox.c => hinic_mbox.c} | 1267 +++--
.../net/ethernet/huawei/hinic/hinic_mbox.h | 241 +
.../net/ethernet/huawei/hinic/hinic_mgmt.c | 1446 +++++
.../net/ethernet/huawei/hinic/hinic_mgmt.h | 245 +
.../huawei/hinic/hinic_mgmt_interface.h | 987 ++++
.../ethernet/huawei/hinic/hinic_msix_attr.c | 130 +
.../ethernet/huawei/hinic/hinic_msix_attr.h | 69 +
.../huawei/hinic/hinic_multi_host_mgmt.c | 972 ++++
.../huawei/hinic/hinic_multi_host_mgmt.h | 107 +
drivers/net/ethernet/huawei/hinic/hinic_nic.h | 115 +
.../net/ethernet/huawei/hinic/hinic_nic_cfg.c | 3995 +++++++++++++
.../net/ethernet/huawei/hinic/hinic_nic_cfg.h | 638 +++
.../net/ethernet/huawei/hinic/hinic_nic_dbg.c | 294 +
.../net/ethernet/huawei/hinic/hinic_nic_dev.h | 289 +
.../net/ethernet/huawei/hinic/hinic_nic_io.c | 1047 ++++
.../net/ethernet/huawei/hinic/hinic_nic_io.h | 99 +
.../net/ethernet/huawei/hinic/hinic_nictool.c | 2503 +++++++++
.../net/ethernet/huawei/hinic/hinic_nictool.h | 277 +
.../ethernet/huawei/hinic/hinic_pci_id_tbl.h | 40 +
.../net/ethernet/huawei/hinic/hinic_port.c | 1412 -----
.../net/ethernet/huawei/hinic/hinic_port.h | 900 ---
.../ethernet/huawei/hinic/hinic_port_cmd.h | 541 ++
.../net/ethernet/huawei/hinic/hinic_qe_def.h | 459 ++
drivers/net/ethernet/huawei/hinic/hinic_qp.c | 222 +
drivers/net/ethernet/huawei/hinic/hinic_qp.h | 143 +
drivers/net/ethernet/huawei/hinic/hinic_rx.c | 1342 +++--
drivers/net/ethernet/huawei/hinic/hinic_rx.h | 126 +-
.../net/ethernet/huawei/hinic/hinic_sm_lt.h | 228 +
.../ethernet/huawei/hinic/hinic_sml_counter.c | 310 +
.../ethernet/huawei/hinic/hinic_sml_counter.h | 119 +
.../net/ethernet/huawei/hinic/hinic_sml_lt.c | 286 +
.../net/ethernet/huawei/hinic/hinic_sriov.c | 1535 ++---
.../net/ethernet/huawei/hinic/hinic_sriov.h | 98 +-
drivers/net/ethernet/huawei/hinic/hinic_tx.c | 1426 +++--
drivers/net/ethernet/huawei/hinic/hinic_tx.h | 127 +-
drivers/net/ethernet/huawei/hinic/hinic_wq.c | 683 +++
drivers/net/ethernet/huawei/hinic/hinic_wq.h | 117 +
drivers/net/ethernet/huawei/hinic/ossl_knl.h | 63 +
94 files changed, 46546 insertions(+), 19871 deletions(-)
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_cfg.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_cfg.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_csr.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dbg.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dcb.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dcb.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dev.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_devlink.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_devlink.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_eqs.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_eqs.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hwif.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hwif.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_lld.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_lld.h
rename drivers/net/ethernet/huawei/hinic/{hinic_hw_mbox.c => hinic_mbox.c} (58%)
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_mbox.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_io.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nic_io.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nictool.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_nictool.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_qp.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_qp.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_wq.c
create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_wq.h
create mode 100644 drivers/net/ethernet/huawei/hinic/ossl_knl.h
diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig b/drivers/net/ethernet/huawei/hinic/Kconfig
index b47bd5440c5f..137f64a18c5f 100644
--- a/drivers/net/ethernet/huawei/hinic/Kconfig
+++ b/drivers/net/ethernet/huawei/hinic/Kconfig
@@ -5,10 +5,10 @@
config HINIC
tristate "Huawei Intelligent PCIE Network Interface Card"
- depends on (PCI_MSI && (X86 || ARM64))
- select NET_DEVLINK
+ default n
+ depends on PCI_MSI && NUMA && PCI_IOV && DCB && (X86 || ARM64)
help
This driver supports HiNIC PCIE Ethernet cards.
To compile this driver as part of the kernel, choose Y here.
If unsure, choose N.
- The default is compiled as module.
+ The default is N.
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile
index 2f89119c9b69..7a1e20340f30 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -1,8 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HINIC) += hinic.o
-hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
- hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
- hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
- hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o \
- hinic_sriov.o hinic_debugfs.o
+hinic-y := hinic_nic_cfg.o hinic_nic_io.o hinic_nic_dbg.o \
+ hinic_hwif.o hinic_msix_attr.o hinic_eqs.o \
+ hinic_mbox.o hinic_api_cmd.o hinic_mgmt.o \
+ hinic_wq.o hinic_cmdq.o hinic_hwdev.o hinic_cfg.o \
+ hinic_sml_counter.o hinic_sml_lt.o \
+ hinic_multi_host_mgmt.o hinic_main.o hinic_lld.o \
+ hinic_qp.o hinic_rx.o hinic_tx.o hinic_dbgtool_knl.o \
+ hinic_nictool.o hinic_sriov.o hinic_dcb.o\
+ hinic_ethtool.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c
new file mode 100644
index 000000000000..2df14231c925
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/semaphore.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+#include "hinic_api_cmd.h"
+
+#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U
+
+#define API_CMD_CELL_DESC_SIZE 8
+#define API_CMD_CELL_DATA_ADDR_SIZE 8
+
+#define API_CHAIN_NUM_CELLS 32
+#define API_CHAIN_CELL_SIZE 128
+#define API_CHAIN_RSP_DATA_SIZE 128
+
+#define API_CMD_CELL_WB_ADDR_SIZE 8
+
+#define API_CHAIN_CELL_ALIGNMENT 8
+
+#define API_CMD_TIMEOUT 10000
+#define API_CMD_STATUS_TIMEOUT 100000
+
+#define API_CMD_BUF_SIZE 2048ULL
+
+#define API_CMD_NODE_ALIGN_SIZE 512ULL
+#define API_PAYLOAD_ALIGN_SIZE 64ULL
+
+#define API_CHAIN_RESP_ALIGNMENT 64ULL
+
+#define COMPLETION_TIMEOUT_DEFAULT 1000UL
+#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U
+
+#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val)))
+
+#define READ_API_CMD_PRIV_DATA(id, token) (((id) << 16) + (token))
+#define WRITE_API_CMD_PRIV_DATA(id) (((u8)id) << 16)
+
+#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1))
+
+#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2)
+#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3)
+
+enum api_cmd_data_format {
+ SGL_DATA = 1,
+};
+
+enum api_cmd_type {
+ API_CMD_WRITE_TYPE = 0,
+ API_CMD_READ_TYPE = 1,
+};
+
+enum api_cmd_bypass {
+ NOT_BYPASS = 0,
+ BYPASS = 1,
+};
+
+enum api_cmd_resp_aeq {
+ NOT_TRIGGER = 0,
+ TRIGGER = 1,
+};
+
+static u8 xor_chksum_set(void *data)
+{
+ int idx;
+ u8 checksum = 0;
+ u8 *val = data;
+
+ for (idx = 0; idx < 7; idx++)
+ checksum ^= val[idx];
+
+ return checksum;
+}
+
+static void set_prod_idx(struct hinic_api_cmd_chain *chain)
+{
+ enum hinic_api_cmd_chain_type chain_type = chain->chain_type;
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 hw_prod_idx_addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type);
+ u32 prod_idx = chain->prod_idx;
+
+ hinic_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx);
+}
+
+static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain)
+{
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwdev->hwif, addr);
+
+ return HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
+}
+
+static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwdev->hwif, addr);
+
+ sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n",
+ chain->chain_type, HINIC_API_CMD_STATUS_GET(val, CPLD_ERR),
+ HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR),
+ HINIC_API_CMD_STATUS_GET(val, FSM));
+
+ sdk_err(dev, "Chain hw current ci: 0x%x\n",
+ HINIC_API_CMD_STATUS_GET(val, CONS_IDX));
+
+ addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type);
+ val = hinic_hwif_read_reg(chain->hwdev->hwif, addr);
+ sdk_err(dev, "Chain hw current pi: 0x%x\n", val);
+}
+
+/**
+ * chain_busy - check if the chain is still processing last requests
+ * @chain: chain to check
+ * Return: 0 - success, negative - failure
+ */
+static int chain_busy(struct hinic_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ struct hinic_api_cmd_cell_ctxt *ctxt;
+ u64 resp_header;
+
+ ctxt = &chain->cell_ctxt[chain->prod_idx];
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_MULTI_READ:
+ case HINIC_API_CMD_POLL_READ:
+ resp_header = be64_to_cpu(ctxt->resp->header);
+ if (ctxt->status &&
+ !HINIC_API_CMD_RESP_HEADER_VALID(resp_header)) {
+ sdk_err(dev, "Context(0x%x) busy, pi: %d, resp_header: 0x%08x%08x\n",
+ ctxt->status, chain->prod_idx,
+ upper_32_bits(resp_header),
+ lower_32_bits(resp_header));
+ dump_api_chain_reg(chain);
+ return -EBUSY;
+ }
+ break;
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ chain->cons_idx = get_hw_cons_idx(chain);
+
+ if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) {
+ sdk_err(dev, "API CMD chain %d is busy, cons_idx = %d, prod_idx = %d\n",
+ chain->chain_type, chain->cons_idx,
+ chain->prod_idx);
+ dump_api_chain_reg(chain);
+ return -EBUSY;
+ }
+ break;
+ default:
+ sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * get_cell_data_size - get the data size of specific cell type
+ * @type: chain type
+ * @cmd_size: the command size
+ * Return: cell_data_size
+ */
+static u16 get_cell_data_size(enum hinic_api_cmd_chain_type type, u16 cmd_size)
+{
+ u16 cell_data_size = 0;
+
+ switch (type) {
+ case HINIC_API_CMD_POLL_READ:
+ cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
+ API_CMD_CELL_WB_ADDR_SIZE +
+ API_CMD_CELL_DATA_ADDR_SIZE,
+ API_CHAIN_CELL_ALIGNMENT);
+ break;
+
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
+ API_CMD_CELL_DATA_ADDR_SIZE,
+ API_CHAIN_CELL_ALIGNMENT);
+ break;
+ default:
+ break;
+ }
+
+ return cell_data_size;
+}
+
+/**
+ * prepare_cell_ctrl - prepare the ctrl of the cell for the command
+ * @cell_ctrl: the control of the cell to set the control into it
+ * @cell_len: the size of the cell
+ */
+static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len)
+{
+ u64 ctrl;
+ u8 chksum;
+
+ ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) |
+ HINIC_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) |
+ HINIC_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF);
+
+ chksum = xor_chksum_set(&ctrl);
+
+ ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ *cell_ctrl = cpu_to_be64(ctrl);
+}
+
+/**
+ * prepare_api_cmd - prepare API CMD command
+ * @chain: chain for the command
+ * @cell: the cell of the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ */
+static void prepare_api_cmd(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell *cell,
+ enum hinic_node_id dest,
+ const void *cmd, u16 cmd_size)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ u32 priv;
+
+ cell_ctxt = &chain->cell_ctxt[chain->prod_idx];
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_POLL_READ:
+ priv = READ_API_CMD_PRIV_DATA(chain->chain_type,
+ cell_ctxt->saved_prod_idx);
+ cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HINIC_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) |
+ HINIC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) |
+ HINIC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) |
+ HINIC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ case HINIC_API_CMD_POLL_WRITE:
+ priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type);
+ cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) |
+ HINIC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) |
+ HINIC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) |
+ HINIC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type);
+ cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) |
+ HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) |
+ HINIC_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) |
+ HINIC_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) |
+ HINIC_API_CMD_DESC_SET(priv, PRIV_DATA);
+ break;
+ default:
+ sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n",
+ chain->chain_type);
+ return;
+ }
+
+ cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) |
+ HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE);
+
+ cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc),
+ XOR_CHKSUM);
+
+ /* The data in the HW should be in Big Endian Format */
+ cell->desc = cpu_to_be64(cell->desc);
+
+ memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size);
+}
+
+/**
+ * prepare_cell - prepare cell ctrl and cmd in the current producer cell
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ */
+static void prepare_cell(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size)
+{
+ struct hinic_api_cmd_cell *curr_node;
+ u16 cell_size;
+
+ curr_node = chain->curr_node;
+
+ cell_size = get_cell_data_size(chain->chain_type, cmd_size);
+
+ prepare_cell_ctrl(&curr_node->ctrl, cell_size);
+ prepare_api_cmd(chain, curr_node, dest, cmd, cmd_size);
+}
+
+static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain)
+{
+ chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1);
+}
+
+static void issue_api_cmd(struct hinic_api_cmd_chain *chain)
+{
+ set_prod_idx(chain);
+}
+
+/**
+ * api_cmd_status_update - update the status of the chain
+ * @chain: chain to update
+ */
+static void api_cmd_status_update(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_api_cmd_status *wb_status;
+ enum hinic_api_cmd_chain_type chain_type;
+ u64 status_header;
+ u32 buf_desc;
+
+ wb_status = chain->wb_status;
+
+ buf_desc = be32_to_cpu(wb_status->buf_desc);
+ if (HINIC_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR))
+ return;
+
+ status_header = be64_to_cpu(wb_status->header);
+ chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID);
+ if (chain_type >= HINIC_API_CMD_MAX)
+ return;
+
+ if (chain_type != chain->chain_type)
+ return;
+
+ chain->cons_idx = HINIC_API_CMD_STATUS_GET(buf_desc, CONS_IDX);
+}
+
+/**
+ * wait_for_status_poll - wait for write to mgmt command to complete
+ * @chain: the chain of the command
+ * Return: 0 - success, negative - failure
+ */
+static int wait_for_status_poll(struct hinic_api_cmd_chain *chain)
+{
+ int err = -ETIMEDOUT;
+ u32 cnt = 0;
+
+ while (cnt < API_CMD_STATUS_TIMEOUT &&
+ chain->hwdev->chip_present_flag) {
+ api_cmd_status_update(chain);
+
+ /* SYNC API CMD cmd should start after prev cmd finished */
+ if (chain->cons_idx == chain->prod_idx) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(50, 100);
+ cnt++;
+ }
+
+ return err;
+}
+
+static void copy_resp_data(struct hinic_api_cmd_cell_ctxt *ctxt, void *ack,
+ u16 ack_size)
+{
+ struct hinic_api_cmd_resp_fmt *resp = ctxt->resp;
+
+ memcpy(ack, &resp->resp_data, ack_size);
+ ctxt->status = 0;
+}
+
+/**
+ * prepare_cell - polling for respense data of the read api-command
+ * @ctxt: pointer to api cmd cell ctxt
+ *
+ * Return: 0 - success, negative - failure
+ */
+static int wait_for_resp_polling(struct hinic_api_cmd_cell_ctxt *ctxt)
+{
+ u64 resp_header;
+ int ret = -ETIMEDOUT;
+ u32 cnt = 0;
+
+ while (cnt < POLLING_COMPLETION_TIMEOUT_DEFAULT) {
+ resp_header = be64_to_cpu(ctxt->resp->header);
+
+ rmb(); /* read the latest header */
+
+ if (HINIC_API_CMD_RESP_HEADER_VALID(resp_header)) {
+ ret = 0;
+ break;
+ }
+ usleep_range(100, 1000);
+ cnt++;
+ }
+
+ if (ret)
+ pr_err("Wait for api chain response timeout\n");
+
+ return ret;
+}
+
+/**
+ * wait_for_api_cmd_completion - wait for command to complete
+ * @chain: chain for the command
+ * @ctxt: pointer to api cmd cell ctxt
+ * @ack: pointer to ack message
+ * @ack_size: the size of ack message
+ * Return: 0 - success, negative - failure
+ */
+static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell_ctxt *ctxt,
+ void *ack, u16 ack_size)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ int err = 0;
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_POLL_READ:
+ err = wait_for_resp_polling(ctxt);
+ if (!err)
+ copy_resp_data(ctxt, ack, ack_size);
+ break;
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ err = wait_for_status_poll(chain);
+ if (err) {
+ sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n",
+ chain->chain_type);
+ break;
+ }
+ break;
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ /* No need to wait */
+ break;
+ default:
+ sdk_err(dev, "Unknown API CMD Chain type: %d\n",
+ chain->chain_type);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ dump_api_chain_reg(chain);
+
+ return err;
+}
+
+static inline void update_api_cmd_ctxt(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell_ctxt *ctxt)
+{
+ ctxt->status = 1;
+ ctxt->saved_prod_idx = chain->prod_idx;
+ if (ctxt->resp) {
+ ctxt->resp->header = 0;
+
+ /* make sure "header" was cleared */
+ wmb();
+ }
+}
+
+/**
+ * api_cmd - API CMD command
+ * @chain: chain for the command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @cmd_size: the command size
+ * @ack: the buffer for ack
+ * @ack_size: the size of ack
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 cmd_size, void *ack, u16 ack_size)
+{
+ struct hinic_api_cmd_cell_ctxt *ctxt;
+
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_lock(&chain->async_lock);
+ else
+ down(&chain->sem);
+ ctxt = &chain->cell_ctxt[chain->prod_idx];
+ if (chain_busy(chain)) {
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_unlock(&chain->async_lock);
+ else
+ up(&chain->sem);
+ return -EBUSY;
+ }
+ update_api_cmd_ctxt(chain, ctxt);
+
+ prepare_cell(chain, dest, cmd, cmd_size);
+
+ cmd_chain_prod_idx_inc(chain);
+
+ wmb(); /* issue the command */
+
+ issue_api_cmd(chain);
+
+ /* incremented prod idx, update ctxt */
+
+ chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr;
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_unlock(&chain->async_lock);
+ else
+ up(&chain->sem);
+
+ return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size);
+}
+
+/**
+ * hinic_api_cmd_write - Write API CMD command
+ * @chain: chain for write command
+ * @dest: destination node on the card that will receive the command
+ * @cmd: command data
+ * @size: the command size
+ * Return: 0 - success, negative - failure
+ */
+int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, void *cmd, u16 size)
+{
+ /* Verify the chain type */
+ return api_cmd(chain, dest, cmd, size, NULL, 0);
+}
+
+int hinic_api_cmd_read(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest,
+ void *cmd, u16 size, void *ack, u16 ack_size)
+{
+ return api_cmd(chain, dest, cmd, size, ack, ack_size);
+}
+
+/**
+ * api_cmd_hw_restart - restart the chain in the HW
+ * @cmd_chain: the API CMD specific chain to restart
+ */
+static int api_cmd_hw_restart(struct hinic_api_cmd_chain *cmd_chain)
+{
+ struct hinic_hwif *hwif = cmd_chain->hwdev->hwif;
+ u32 reg_addr, val;
+ int err;
+ u32 cnt = 0;
+
+ /* Read Modify Write */
+ reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type);
+ val = hinic_hwif_read_reg(hwif, reg_addr);
+
+ val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART);
+ val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART);
+
+ hinic_hwif_write_reg(hwif, reg_addr, val);
+
+ err = -ETIMEDOUT;
+ while (cnt < API_CMD_TIMEOUT) {
+ val = hinic_hwif_read_reg(hwif, reg_addr);
+
+ if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return err;
+}
+
+/**
+ * api_cmd_ctrl_init - set the control register of a chain
+ * @chain: the API CMD specific chain to set control register for
+ */
+static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 reg_addr, ctrl;
+ u32 size;
+
+ /* Read Modify Write */
+ reg_addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT);
+
+ ctrl = hinic_hwif_read_reg(hwif, reg_addr);
+
+ ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) |
+ HINIC_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE);
+
+ hinic_hwif_write_reg(hwif, reg_addr, ctrl);
+}
+
+/**
+ * api_cmd_set_status_addr - set the status address of a chain in the HW
+ * @chain: the API CMD specific chain to set status address for
+ */
+static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->wb_status_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->wb_status_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_set_num_cells - set the number cells of a chain in the HW
+ * @chain: the API CMD specific chain to set the number of cells for
+ */
+static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type);
+ val = chain->num_cells;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * api_cmd_head_init - set the head cell of a chain in the HW
+ * @chain: the API CMD specific chain to set the head for
+ */
+static void api_cmd_head_init(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type);
+ val = upper_32_bits(chain->head_cell_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type);
+ val = lower_32_bits(chain->head_cell_paddr);
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+/**
+ * wait_for_ready_chain - wait for the chain to be ready
+ * @chain: the API CMD specific chain to wait for
+ * Return: 0 - success, negative - failure
+ */
+static int wait_for_ready_chain(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, val;
+ u32 hw_cons_idx;
+ u32 cnt = 0;
+ int err;
+
+ addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type);
+ err = -ETIMEDOUT;
+ while (cnt < API_CMD_TIMEOUT) {
+ val = hinic_hwif_read_reg(hwif, addr);
+ hw_cons_idx = HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
+
+ /* wait for HW cons idx to be updated */
+ if (hw_cons_idx == chain->cons_idx) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return err;
+}
+
+/**
+ * api_cmd_chain_hw_clean - clean the HW
+ * @chain: the API CMD specific chain
+ */
+static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_hwif *hwif = chain->hwdev->hwif;
+ u32 addr, ctrl;
+
+ addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
+
+ ctrl = hinic_hwif_read_reg(hwif, addr);
+ ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
+ HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
+
+ hinic_hwif_write_reg(hwif, addr, ctrl);
+}
+
+/**
+ * api_cmd_chain_hw_init - initialize the chain in the HW
+ * @chain: the API CMD specific chain to initialize in HW
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain)
+{
+ api_cmd_chain_hw_clean(chain);
+
+ api_cmd_set_status_addr(chain);
+
+ if (api_cmd_hw_restart(chain)) {
+ sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n");
+ return -EBUSY;
+ }
+
+ api_cmd_ctrl_init(chain);
+ api_cmd_set_num_cells(chain);
+ api_cmd_head_init(chain);
+
+ return wait_for_ready_chain(chain);
+}
+
+/**
+ * alloc_cmd_buf - allocate a dma buffer for API CMD command
+ * @chain: the API CMD specific chain for the cmd
+ * @cell: the cell in the HW for the cmd
+ * @cell_idx: the index of the cell
+ * Return: 0 - success, negative - failure
+ */
+static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell *cell, u32 cell_idx)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ void *dev = chain->hwdev->dev_hdl;
+ void *buf_vaddr;
+ u64 buf_paddr;
+ int err = 0;
+
+ buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base +
+ chain->buf_size_align * cell_idx);
+ buf_paddr = chain->buf_paddr_base +
+ chain->buf_size_align * cell_idx;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ cell_ctxt->api_cmd_vaddr = buf_vaddr;
+
+ /* set the cmd DMA address in the cell */
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_POLL_READ:
+ cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr);
+ break;
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ /* The data in the HW should be in Big Endian Format */
+ cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr);
+ break;
+ default:
+ sdk_err(dev, "Unknown API CMD Chain type: %d\n",
+ chain->chain_type);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static void alloc_resp_buf(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_cell *cell, u32 cell_idx)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ void *resp_vaddr;
+ u64 resp_paddr;
+
+ resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base +
+ chain->rsp_size_align * cell_idx);
+ resp_paddr = chain->rsp_paddr_base +
+ chain->rsp_size_align * cell_idx;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+
+ cell_ctxt->resp = resp_vaddr;
+ cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr);
+}
+
+static int hinic_alloc_api_cmd_cell_buf(struct hinic_api_cmd_chain *chain,
+ u32 cell_idx,
+ struct hinic_api_cmd_cell *node)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ int err;
+
+ /* For read chain, we should allocate buffer for the response data */
+ if (chain->chain_type == HINIC_API_CMD_MULTI_READ ||
+ chain->chain_type == HINIC_API_CMD_POLL_READ)
+ alloc_resp_buf(chain, node, cell_idx);
+
+ switch (chain->chain_type) {
+ case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
+ case HINIC_API_CMD_POLL_WRITE:
+ case HINIC_API_CMD_POLL_READ:
+ case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU:
+ err = alloc_cmd_buf(chain, node, cell_idx);
+ if (err) {
+ sdk_err(dev, "Failed to allocate cmd buffer\n");
+ goto alloc_cmd_buf_err;
+ }
+ break;
+ /* For api command write and api command read, the data section
+ * is directly inserted in the cell, so no need to allocate.
+ */
+ case HINIC_API_CMD_MULTI_READ:
+ chain->cell_ctxt[cell_idx].api_cmd_vaddr =
+ &node->read.hw_cmd_paddr;
+ break;
+ default:
+ sdk_err(dev, "Unsupported API CMD chain type\n");
+ err = -EINVAL;
+ goto alloc_cmd_buf_err;
+ }
+
+ return 0;
+
+alloc_cmd_buf_err:
+
+ return err;
+}
+
+/**
+ * api_cmd_create_cell - create API CMD cell of specific chain
+ * @chain: the API CMD specific chain to create its cell
+ * @cell_idx: the cell index to create
+ * @pre_node: previous cell
+ * @node_vaddr: the virt addr of the cell
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, u32 cell_idx,
+ struct hinic_api_cmd_cell *pre_node,
+ struct hinic_api_cmd_cell **node_vaddr)
+{
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_api_cmd_cell *node;
+ void *cell_vaddr;
+ u64 cell_paddr;
+ int err;
+
+ cell_vaddr = (void *)((u64)chain->cell_vaddr_base +
+ chain->cell_size_align * cell_idx);
+ cell_paddr = chain->cell_paddr_base +
+ chain->cell_size_align * cell_idx;
+
+ cell_ctxt = &chain->cell_ctxt[cell_idx];
+ cell_ctxt->cell_vaddr = cell_vaddr;
+ node = cell_ctxt->cell_vaddr;
+
+ if (!pre_node) {
+ chain->head_node = cell_vaddr;
+ chain->head_cell_paddr = cell_paddr;
+ } else {
+ /* The data in the HW should be in Big Endian Format */
+ pre_node->next_cell_paddr = cpu_to_be64(cell_paddr);
+ }
+
+ /* Driver software should make sure that there is an empty API
+ * command cell at the end the chain
+ */
+ node->next_cell_paddr = 0;
+
+ err = hinic_alloc_api_cmd_cell_buf(chain, cell_idx, node);
+ if (err)
+ return err;
+
+ *node_vaddr = node;
+
+ return 0;
+}
+
+/**
+ * api_cmd_create_cells - create API CMD cells for specific chain
+ * @chain: the API CMD specific chain
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain)
+{
+ struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL;
+ void *dev = chain->hwdev->dev_hdl;
+ u32 cell_idx;
+ int err;
+
+ for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) {
+ err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
+ if (err) {
+ sdk_err(dev, "Failed to create API CMD cell\n");
+ return err;
+ }
+
+ pre_node = node;
+ }
+
+ if (!node)
+ return -EFAULT;
+
+ /* set the Final node to point on the start */
+ node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
+
+ /* set the current node to be the head */
+ chain->curr_node = chain->head_node;
+ return 0;
+}
+
+/**
+ * api_chain_init - initialize API CMD specific chain
+ * @chain: the API CMD specific chain to initialize
+ * @attr: attributes to set in the chain
+ * Return: 0 - success, negative - failure
+ */
+static int api_chain_init(struct hinic_api_cmd_chain *chain,
+ struct hinic_api_cmd_chain_attr *attr)
+{
+ void *dev = chain->hwdev->dev_hdl;
+ size_t cell_ctxt_size;
+ size_t cells_buf_size;
+ int err;
+
+ chain->chain_type = attr->chain_type;
+ chain->num_cells = attr->num_cells;
+ chain->cell_size = attr->cell_size;
+ chain->rsp_size = attr->rsp_size;
+
+ chain->prod_idx = 0;
+ chain->cons_idx = 0;
+
+ if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU)
+ spin_lock_init(&chain->async_lock);
+ else
+ sema_init(&chain->sem, 1);
+
+ cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
+ if (!cell_ctxt_size) {
+ sdk_err(dev, "Api chain cell size cannot be zero\n");
+ return -EINVAL;
+ }
+
+ chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL);
+ if (!chain->cell_ctxt)
+ return -ENOMEM;
+
+ chain->wb_status = dma_alloc_coherent(dev, sizeof(*chain->wb_status),
+ &chain->wb_status_paddr,
+ GFP_KERNEL);
+ if (!chain->wb_status) {
+ sdk_err(dev, "Failed to allocate DMA wb status\n");
+ err = -ENOMEM;
+ goto alloc_wb_status_err;
+ }
+
+ chain->cell_size_align = ALIGN((u64)chain->cell_size,
+ API_CMD_NODE_ALIGN_SIZE);
+ chain->rsp_size_align = ALIGN((u64)chain->rsp_size,
+ API_CHAIN_RESP_ALIGNMENT);
+ chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE);
+
+ cells_buf_size = (chain->cell_size_align + chain->rsp_size_align +
+ chain->buf_size_align) * chain->num_cells;
+
+ err = hinic_dma_alloc_coherent_align(dev, cells_buf_size,
+ API_CMD_NODE_ALIGN_SIZE,
+ GFP_KERNEL,
+ &chain->cells_addr);
+ if (err) {
+ sdk_err(dev, "Failed to allocate API CMD cells buffer\n");
+ goto alloc_cells_buf_err;
+ }
+
+ chain->cell_vaddr_base = chain->cells_addr.align_vaddr;
+ chain->cell_paddr_base = chain->cells_addr.align_paddr;
+
+ chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base +
+ chain->cell_size_align * chain->num_cells);
+ chain->rsp_paddr_base = chain->cell_paddr_base +
+ chain->cell_size_align * chain->num_cells;
+
+ chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base +
+ chain->rsp_size_align * chain->num_cells);
+ chain->buf_paddr_base = chain->rsp_paddr_base +
+ chain->rsp_size_align * chain->num_cells;
+
+ return 0;
+
+alloc_cells_buf_err:
+ dma_free_coherent(dev, sizeof(*chain->wb_status),
+ chain->wb_status, chain->wb_status_paddr);
+
+alloc_wb_status_err:
+ kfree(chain->cell_ctxt);
+
+ return err;
+}
+
+/**
+ * api_chain_free - free API CMD specific chain
+ * @chain: the API CMD specific chain to free
+ */
+static void api_chain_free(struct hinic_api_cmd_chain *chain)
+{
+ void *dev = chain->hwdev->dev_hdl;
+
+ hinic_dma_free_coherent_align(dev, &chain->cells_addr);
+
+ dma_free_coherent(dev, sizeof(*chain->wb_status),
+ chain->wb_status, chain->wb_status_paddr);
+ kfree(chain->cell_ctxt);
+}
+
+/**
+ * api_cmd_create_chain - create API CMD specific chain
+ * @chain: the API CMD specific chain to create
+ * @attr: attributes to set in the chain
+ * Return: 0 - success, negative - failure
+ */
+static int api_cmd_create_chain(struct hinic_api_cmd_chain **cmd_chain,
+ struct hinic_api_cmd_chain_attr *attr)
+{
+ struct hinic_hwdev *hwdev = attr->hwdev;
+ struct hinic_api_cmd_chain *chain;
+ int err;
+
+ if (attr->num_cells & (attr->num_cells - 1)) {
+ sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n");
+ return -EINVAL;
+ }
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (!chain)
+ return -ENOMEM;
+
+ chain->hwdev = hwdev;
+
+ err = api_chain_init(chain, attr);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n");
+ goto chain_init_err;
+ }
+
+ err = api_cmd_create_cells(chain);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n");
+ goto create_cells_err;
+ }
+
+ err = api_cmd_chain_hw_init(chain);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n");
+ goto chain_hw_init_err;
+ }
+
+ *cmd_chain = chain;
+ return 0;
+
+chain_hw_init_err:
+create_cells_err:
+ api_chain_free(chain);
+
+chain_init_err:
+ kfree(chain);
+ return err;
+}
+
+/**
+ * api_cmd_destroy_chain - destroy API CMD specific chain
+ * @chain: the API CMD specific chain to destroy
+ */
+static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain)
+{
+ api_chain_free(chain);
+ kfree(chain);
+}
+
+/**
+ * hinic_api_cmd_init - Initialize all the API CMD chains
+ * @hwdev: the pointer to hw device
+ * @chain: the API CMD chains that will be initialized
+ * Return: 0 - success, negative - failure
+ */
+int hinic_api_cmd_init(struct hinic_hwdev *hwdev,
+ struct hinic_api_cmd_chain **chain)
+{
+ void *dev = hwdev->dev_hdl;
+ struct hinic_api_cmd_chain_attr attr;
+ enum hinic_api_cmd_chain_type chain_type, i;
+ int err;
+
+ attr.hwdev = hwdev;
+ attr.num_cells = API_CHAIN_NUM_CELLS;
+ attr.cell_size = API_CHAIN_CELL_SIZE;
+ attr.rsp_size = API_CHAIN_RSP_DATA_SIZE;
+
+ chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
+ for (; chain_type < HINIC_API_CMD_MAX; chain_type++) {
+ attr.chain_type = chain_type;
+
+ err = api_cmd_create_chain(&chain[chain_type], &attr);
+ if (err) {
+ sdk_err(dev, "Failed to create chain %d\n", chain_type);
+ goto create_chain_err;
+ }
+ }
+
+ return 0;
+
+create_chain_err:
+ i = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
+ for (; i < chain_type; i++)
+ api_cmd_destroy_chain(chain[i]);
+
+ return err;
+}
+
+/**
+ * hinic_api_cmd_free - free the API CMD chains
+ * @chain: the API CMD chains that will be freed
+ */
+void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain)
+{
+ enum hinic_api_cmd_chain_type chain_type;
+
+ chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
+
+ for (; chain_type < HINIC_API_CMD_MAX; chain_type++)
+ api_cmd_destroy_chain(chain[chain_type]);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h
new file mode 100644
index 000000000000..44914581ce80
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_API_CMD_H_
+#define HINIC_API_CMD_H_
+
+#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0
+#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16
+#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24
+#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
+
+#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU
+#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU
+#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU
+#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU
+
+#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \
+ ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \
+ HINIC_API_CMD_CELL_CTRL_##member##_SHIFT)
+
+#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0
+#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1
+#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
+#define HINIC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3
+#define HINIC_API_CMD_DESC_PRIV_DATA_SHIFT 8
+#define HINIC_API_CMD_DESC_DEST_SHIFT 32
+#define HINIC_API_CMD_DESC_SIZE_SHIFT 40
+#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
+
+#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1U
+#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1U
+#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U
+#define HINIC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U
+#define HINIC_API_CMD_DESC_DEST_MASK 0x1FU
+#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FFU
+#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU
+#define HINIC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU
+
+#define HINIC_API_CMD_DESC_SET(val, member) \
+ ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \
+ HINIC_API_CMD_DESC_##member##_SHIFT)
+
+#define HINIC_API_CMD_STATUS_HEADER_VALID_SHIFT 0
+#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
+
+#define HINIC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU
+#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU
+
+#define HINIC_API_CMD_STATUS_VALID_CODE 0xFF
+
+#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \
+ (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
+ HINIC_API_CMD_STATUS_HEADER_##member##_MASK)
+
+#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
+#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2
+
+#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U
+
+#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \
+ (((val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \
+ HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)
+
+#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \
+ (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
+ HINIC_API_CMD_CHAIN_REQ_##member##_MASK)
+
+#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
+ ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \
+ << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
+
+#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
+#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
+#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
+#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
+
+#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U
+#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U
+#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U
+#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U
+
+#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \
+ (((val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
+ HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
+
+#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
+ ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \
+ << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
+
+#define HINIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF
+#define HINIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF
+
+#define HINIC_API_CMD_RESP_HEADER_VALID(val) \
+ (((val) & HINIC_API_CMD_RESP_HEAD_VALID_MASK) == \
+ HINIC_API_CMD_RESP_HEAD_VALID_CODE)
+
+#define HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8
+#define HINIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU
+
+#define HINIC_API_CMD_RESP_HEAD_ERR_CODE 0x1
+#define HINIC_API_CMD_RESP_HEAD_ERR(val) \
+ ((((val) >> HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT) & \
+ HINIC_API_CMD_RESP_HEAD_STATUS_MASK) == \
+ HINIC_API_CMD_RESP_HEAD_ERR_CODE)
+
+#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16
+#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFF
+
+#define HINIC_API_CMD_RESP_RESERVED 3
+#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \
+ (((val) >> HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \
+ HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK)
+
+#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40
+#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU
+
+#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \
+ ((u16)(((val) >> HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \
+ HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK))
+
+#define HINIC_API_CMD_STATUS_HEAD_VALID_MASK 0xFFU
+#define HINIC_API_CMD_STATUS_HEAD_VALID_SHIFT 0
+
+#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_MASK 0xFFU
+#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT 16
+
+#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU
+#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0
+
+#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU
+#define HINIC_API_CMD_STATUS_FSM_SHIFT 24
+
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
+
+#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U
+#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30
+
+#define HINIC_API_CMD_STATUS_CHAIN_ID(val) \
+ (((val) >> HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT) & \
+ HINIC_API_CMD_STATUS_HEAD_VALID_MASK)
+
+#define HINIC_API_CMD_STATUS_CONS_IDX(val) \
+ ((val) & HINIC_API_CMD_STATUS_CONS_IDX_MASK)
+
+#define HINIC_API_CMD_STATUS_CHKSUM_ERR(val) \
+ (((val) >> HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \
+ HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK)
+
+#define HINIC_API_CMD_STATUS_GET(val, member) \
+ (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \
+ HINIC_API_CMD_STATUS_##member##_MASK)
+
+enum hinic_api_cmd_chain_type {
+ /* write command with completion notification */
+ HINIC_API_CMD_WRITE = 0,
+ /* read command with completion notification */
+ HINIC_API_CMD_READ = 1,
+ /* write to mgmt cpu command with completion */
+ HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2,
+ /* multi read command with completion notification - not used */
+ HINIC_API_CMD_MULTI_READ = 3,
+ /* write command without completion notification */
+ HINIC_API_CMD_POLL_WRITE = 4,
+ /* read command without completion notification */
+ HINIC_API_CMD_POLL_READ = 5,
+ /* read from mgmt cpu command with completion */
+ HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6,
+ HINIC_API_CMD_MAX,
+};
+
+struct hinic_api_cmd_status {
+ u64 header;
+ u32 buf_desc;
+ u32 cell_addr_hi;
+ u32 cell_addr_lo;
+ u32 rsvd0;
+ u64 rsvd1;
+};
+
+/* HW struct */
+struct hinic_api_cmd_cell {
+ u64 ctrl;
+
+ /* address is 64 bit in HW struct */
+ u64 next_cell_paddr;
+
+ u64 desc;
+
+ /* HW struct */
+ union {
+ struct {
+ u64 hw_cmd_paddr;
+ } write;
+
+ struct {
+ u64 hw_wb_resp_paddr;
+ u64 hw_cmd_paddr;
+ } read;
+ };
+};
+
+struct hinic_api_cmd_resp_fmt {
+ u64 header;
+ u64 rsvd[3];
+ u64 resp_data;
+};
+
+struct hinic_api_cmd_cell_ctxt {
+ struct hinic_api_cmd_cell *cell_vaddr;
+
+ void *api_cmd_vaddr;
+
+ struct hinic_api_cmd_resp_fmt *resp;
+
+ struct completion done;
+ int status;
+
+ u32 saved_prod_idx;
+};
+
+struct hinic_api_cmd_chain_attr {
+ struct hinic_hwdev *hwdev;
+ enum hinic_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 rsp_size;
+ u16 cell_size;
+};
+
+struct hinic_api_cmd_chain {
+ struct hinic_hwdev *hwdev;
+ enum hinic_api_cmd_chain_type chain_type;
+
+ u32 num_cells;
+ u16 cell_size;
+ u16 rsp_size;
+
+ /* HW members is 24 bit format */
+ u32 prod_idx;
+ u32 cons_idx;
+
+ struct semaphore sem;
+ /* Async cmd can not be scheduling */
+ spinlock_t async_lock;
+
+ dma_addr_t wb_status_paddr;
+ struct hinic_api_cmd_status *wb_status;
+
+ dma_addr_t head_cell_paddr;
+ struct hinic_api_cmd_cell *head_node;
+
+ struct hinic_api_cmd_cell_ctxt *cell_ctxt;
+ struct hinic_api_cmd_cell *curr_node;
+
+ struct hinic_dma_addr_align cells_addr;
+
+ u8 *cell_vaddr_base;
+ u64 cell_paddr_base;
+ u8 *rsp_vaddr_base;
+ u64 rsp_paddr_base;
+ u8 *buf_vaddr_base;
+ u64 buf_paddr_base;
+ u64 cell_size_align;
+ u64 rsp_size_align;
+ u64 buf_size_align;
+};
+
+int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, void *cmd, u16 size);
+
+int hinic_api_cmd_read(struct hinic_api_cmd_chain *chain,
+ enum hinic_node_id dest, void *cmd, u16 size,
+ void *ack, u16 ack_size);
+
+int hinic_api_cmd_init(struct hinic_hwdev *hwdev,
+ struct hinic_api_cmd_chain **chain);
+
+void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cfg.c b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c
new file mode 100644
index 000000000000..0c3d362a78de
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c
@@ -0,0 +1,2478 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/vmalloc.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwif.h"
+#include "hinic_mbox.h"
+#include "hinic_cfg.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_multi_host_mgmt.h"
+
+uint g_rdma_mtts_num;
+uint g_rdma_qps_num;
+uint g_rdma_mpts_num;
+uint g_vfs_num;
+module_param(g_rdma_mtts_num, uint, 0444);
+MODULE_PARM_DESC(g_rdma_mtts_num, "number of roce used mtts, use default value when pass 0");
+module_param(g_rdma_qps_num, uint, 0444);
+MODULE_PARM_DESC(g_rdma_qps_num, "number of roce used qps, use default value when pass 0");
+module_param(g_rdma_mpts_num, uint, 0444);
+MODULE_PARM_DESC(g_rdma_mpts_num, "number of roce used mpts, use default value when pass 0");
+module_param(g_vfs_num, uint, 0444);
+MODULE_PARM_DESC(g_vfs_num, "number of used vfs, use default value when pass 0 ");
+
+static uint intr_mode;
+
+uint timer_enable = 1;
+uint bloomfilter_enable;
+uint g_test_qpc_num;
+uint g_test_qpc_resvd_num;
+uint g_test_pagesize_reorder;
+uint g_test_xid_alloc_mode = 1;
+uint g_test_gpa_check_enable = 1;
+uint g_test_qpc_alloc_mode = 2;
+uint g_test_scqc_alloc_mode = 2;
+uint g_test_max_conn;
+uint g_test_max_cache_conn;
+uint g_test_scqc_num;
+uint g_test_mpt_num;
+uint g_test_mpt_resvd;
+uint g_test_scq_resvd;
+uint g_test_hash_num;
+uint g_test_reorder_num;
+
+static void set_cfg_test_param(struct cfg_mgmt_info *cfg_mgmt)
+{
+ cfg_mgmt->svc_cap.timer_en = (u8)timer_enable;
+ cfg_mgmt->svc_cap.bloomfilter_en = (u8)bloomfilter_enable;
+ cfg_mgmt->svc_cap.test_qpc_num = g_test_qpc_num;
+ cfg_mgmt->svc_cap.test_qpc_resvd_num = g_test_qpc_resvd_num;
+ cfg_mgmt->svc_cap.test_page_size_reorder = g_test_pagesize_reorder;
+ cfg_mgmt->svc_cap.test_xid_alloc_mode = (bool)g_test_xid_alloc_mode;
+ cfg_mgmt->svc_cap.test_gpa_check_enable = (bool)g_test_gpa_check_enable;
+ cfg_mgmt->svc_cap.test_qpc_alloc_mode = (u8)g_test_qpc_alloc_mode;
+ cfg_mgmt->svc_cap.test_scqc_alloc_mode = (u8)g_test_scqc_alloc_mode;
+ cfg_mgmt->svc_cap.test_max_conn_num = g_test_max_conn;
+ cfg_mgmt->svc_cap.test_max_cache_conn_num = g_test_max_cache_conn;
+ cfg_mgmt->svc_cap.test_scqc_num = g_test_scqc_num;
+ cfg_mgmt->svc_cap.test_mpt_num = g_test_mpt_num;
+ cfg_mgmt->svc_cap.test_scq_resvd_num = g_test_scq_resvd;
+ cfg_mgmt->svc_cap.test_mpt_recvd_num = g_test_mpt_resvd;
+ cfg_mgmt->svc_cap.test_hash_num = g_test_hash_num;
+ cfg_mgmt->svc_cap.test_reorder_num = g_test_reorder_num;
+}
+
+int hinic_sync_time(void *hwdev, u64 time)
+{
+ struct hinic_sync_time_info time_info = {0};
+ u16 out_size = sizeof(time_info);
+ int err;
+
+ time_info.mstime = time;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_SYNC_TIME, &time_info,
+ sizeof(time_info), &time_info, &out_size,
+ 0);
+ if (err || time_info.status || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, time_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+void hinic_sync_time_async(void *hwdev, u64 time)
+{
+ struct hinic_sync_time_info time_info = { 0 };
+
+ time_info.mstime = time;
+ hinic_msg_to_mgmt_async(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_SYNC_TIME, &time_info,
+ sizeof(time_info));
+}
+
+static void parse_sf_en_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap, enum func_type type)
+{
+ struct dev_sf_svc_attr *attr = &cap->sf_svc_attr;
+
+ if (type == TYPE_PPF) {
+ /* For PPF's SF EN flag, we assign it in get_dynamic_res_cap().
+ * we only save its VF's flag.
+ */
+ attr->sf_en_vf = dev_cap->sf_en_vf;
+ } else if (type == TYPE_PF) {
+ if (dev_cap->sf_en_pf)
+ cap->sf_en = true;
+ else
+ cap->sf_en = false;
+
+ attr->sf_en_vf = dev_cap->sf_en_vf;
+ } else {
+ /* VF gets SF_EN_VF from PPF/PF */
+ if (dev_cap->sf_en_vf)
+ cap->sf_en = true;
+ else
+ cap->sf_en = false;
+
+ attr->sf_en_vf = 0;
+ }
+}
+
+static void parse_pub_res_cap(struct hinic_hwdev *hwdev,
+ struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_sf_svc_attr *attr = &cap->sf_svc_attr;
+
+ cap->svc_type = dev_cap->svc_cap_en;
+ cap->chip_svc_type = cap->svc_type;
+
+ if (dev_cap->sf_svc_attr & SF_SVC_FT_BIT)
+ attr->ft_en = true;
+ else
+ attr->ft_en = false;
+
+ if (dev_cap->sf_svc_attr & SF_SVC_RDMA_BIT)
+ attr->rdma_en = true;
+ else
+ attr->rdma_en = false;
+
+ cap->host_id = dev_cap->host_id;
+ cap->ep_id = dev_cap->ep_id;
+
+ cap->max_cos_id = dev_cap->max_cos_id;
+ cap->cos_valid_bitmap = dev_cap->valid_cos_bitmap;
+ cap->er_id = dev_cap->er_id;
+ cap->port_id = dev_cap->port_id;
+ cap->force_up = dev_cap->force_up;
+
+ parse_sf_en_cap(cap, dev_cap, type);
+
+ /* PF/PPF */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ cap->max_vf = dev_cap->max_vf;
+ cap->pf_num = dev_cap->pf_num;
+ cap->pf_id_start = dev_cap->pf_id_start;
+ cap->vf_num = dev_cap->vf_num;
+ cap->vf_id_start = dev_cap->vf_id_start;
+
+ /* FC need max queue number, but max queue number info is in
+ * l2nic cap, we also put max queue num info in public cap, so
+ * FC can get correct max queue number info.
+ */
+ cap->max_sqs = dev_cap->nic_max_sq + 1;
+ cap->max_rqs = dev_cap->nic_max_rq + 1;
+ } else {
+ cap->max_vf = 0;
+ cap->max_sqs = dev_cap->nic_max_sq;
+ cap->max_rqs = dev_cap->nic_max_rq;
+ }
+
+ cap->host_total_function = dev_cap->host_total_func;
+ cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val;
+ cap->max_connect_num = dev_cap->max_conn_num;
+ cap->max_stick2cache_num = dev_cap->max_stick2cache_num;
+ cap->bfilter_start_addr = dev_cap->max_bfilter_start_addr;
+ cap->bfilter_len = dev_cap->bfilter_len;
+ cap->hash_bucket_num = dev_cap->hash_bucket_num;
+ cap->dev_ver_info.cfg_file_ver = dev_cap->cfg_file_ver;
+ cap->net_port_mode = dev_cap->net_port_mode;
+
+ /* FC does not use VF */
+ if (cap->net_port_mode == CFG_NET_MODE_FC)
+ cap->max_vf = 0;
+
+ sdk_info(hwdev->dev_hdl, "Get public resource capbility, svc_cap_en: 0x%x\n",
+ dev_cap->svc_cap_en);
+ sdk_info(hwdev->dev_hdl, "Host_id=0x%x, ep_id=0x%x, max_cos_id=0x%x, cos_bitmap=0x%x, er_id=0x%x, port_id=0x%x\n",
+ cap->host_id, cap->ep_id,
+ cap->max_cos_id, cap->cos_valid_bitmap,
+ cap->er_id, cap->port_id);
+ sdk_info(hwdev->dev_hdl, "Host_total_function=0x%x, host_oq_id_mask_val=0x%x, net_port_mode=0x%x, max_vf=0x%x\n",
+ cap->host_total_function, cap->host_oq_id_mask_val,
+ cap->net_port_mode, cap->max_vf);
+
+ sdk_info(hwdev->dev_hdl, "Pf_num=0x%x, pf_id_start=0x%x, vf_num=0x%x, vf_id_start=0x%x\n",
+ cap->pf_num, cap->pf_id_start,
+ cap->vf_num, cap->vf_id_start);
+
+ /* Check parameters from firmware */
+ if (cap->max_sqs > HINIC_CFG_MAX_QP ||
+ cap->max_rqs > HINIC_CFG_MAX_QP) {
+ sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %d, rq: %d\n",
+ HINIC_CFG_MAX_QP, cap->max_sqs, cap->max_rqs);
+ cap->max_sqs = HINIC_CFG_MAX_QP;
+ cap->max_rqs = HINIC_CFG_MAX_QP;
+ }
+}
+
+static void parse_dynamic_share_res_cap(struct hinic_hwdev *hwdev,
+ struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap;
+
+ shared_cap->host_pctxs = dev_cap->host_pctx_num;
+
+ if (dev_cap->host_sf_en)
+ cap->sf_en = true;
+ else
+ cap->sf_en = false;
+
+ shared_cap->host_cctxs = dev_cap->host_ccxt_num;
+ shared_cap->host_scqs = dev_cap->host_scq_num;
+ shared_cap->host_srqs = dev_cap->host_srq_num;
+ shared_cap->host_mpts = dev_cap->host_mpt_num;
+
+ sdk_info(hwdev->dev_hdl, "Dynamic share resource capbility, host_pctxs=0x%x, host_cctxs=0x%x, host_scqs=0x%x, host_srqs=0x%x, host_mpts=0x%x\n",
+ shared_cap->host_pctxs, shared_cap->host_cctxs,
+ shared_cap->host_scqs, shared_cap->host_srqs,
+ shared_cap->host_mpts);
+}
+
+static void parse_l2nic_res_cap(struct hinic_hwdev *hwdev,
+ struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct nic_service_cap *nic_cap = &cap->nic_cap;
+
+ /* PF/PPF */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ nic_cap->max_sqs = dev_cap->nic_max_sq + 1;
+ nic_cap->max_rqs = dev_cap->nic_max_rq + 1;
+ nic_cap->vf_max_sqs = dev_cap->nic_vf_max_sq + 1;
+ nic_cap->vf_max_rqs = dev_cap->nic_vf_max_rq + 1;
+ nic_cap->max_queue_allowed = 0;
+ nic_cap->dynamic_qp = 0;
+ } else {
+ nic_cap->max_sqs = dev_cap->nic_max_sq;
+ nic_cap->max_rqs = dev_cap->nic_max_rq;
+ nic_cap->vf_max_sqs = 0;
+ nic_cap->vf_max_rqs = 0;
+ nic_cap->max_queue_allowed = dev_cap->max_queue_allowed;
+ nic_cap->dynamic_qp = dev_cap->ovs_dq_en;
+ }
+
+ if (dev_cap->nic_lro_en)
+ nic_cap->lro_en = true;
+ else
+ nic_cap->lro_en = false;
+
+ nic_cap->lro_sz = dev_cap->nic_lro_sz;
+ nic_cap->tso_sz = dev_cap->nic_tso_sz;
+
+ sdk_info(hwdev->dev_hdl, "L2nic resource capbility, max_sqs=0x%x, max_rqs=0x%x, vf_max_sqs=0x%x, vf_max_rqs=0x%x, max_queue_allowed=0x%x\n",
+ nic_cap->max_sqs, nic_cap->max_rqs,
+ nic_cap->vf_max_sqs, nic_cap->vf_max_rqs,
+ nic_cap->max_queue_allowed);
+
+ /* Check parameters from firmware */
+ if (nic_cap->max_sqs > HINIC_CFG_MAX_QP ||
+ nic_cap->max_rqs > HINIC_CFG_MAX_QP) {
+ sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %d, rq: %d\n",
+ HINIC_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs);
+ nic_cap->max_sqs = HINIC_CFG_MAX_QP;
+ nic_cap->max_rqs = HINIC_CFG_MAX_QP;
+ }
+}
+
+static void parse_roce_res_cap(struct hinic_hwdev *hwdev,
+ struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_roce_svc_own_cap *roce_cap =
+ &cap->rdma_cap.dev_rdma_cap.roce_own_cap;
+
+ roce_cap->max_qps = dev_cap->roce_max_qp;
+ roce_cap->max_cqs = dev_cap->roce_max_cq;
+ roce_cap->max_srqs = dev_cap->roce_max_srq;
+ roce_cap->max_mpts = dev_cap->roce_max_mpt;
+ roce_cap->num_cos = dev_cap->max_cos_id + 1;
+
+ /* PF/PPF */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ roce_cap->vf_max_qps = dev_cap->roce_vf_max_qp;
+ roce_cap->vf_max_cqs = dev_cap->roce_vf_max_cq;
+ roce_cap->vf_max_srqs = dev_cap->roce_vf_max_srq;
+ roce_cap->vf_max_mpts = dev_cap->roce_vf_max_mpt;
+ } else {
+ roce_cap->vf_max_qps = 0;
+ roce_cap->vf_max_cqs = 0;
+ roce_cap->vf_max_srqs = 0;
+ roce_cap->vf_max_mpts = 0;
+ }
+
+ roce_cap->cmtt_cl_start = dev_cap->roce_cmtt_cl_start;
+ roce_cap->cmtt_cl_end = dev_cap->roce_cmtt_cl_end;
+ roce_cap->cmtt_cl_sz = dev_cap->roce_cmtt_cl_size;
+
+ roce_cap->dmtt_cl_start = dev_cap->roce_dmtt_cl_start;
+ roce_cap->dmtt_cl_end = dev_cap->roce_dmtt_cl_end;
+ roce_cap->dmtt_cl_sz = dev_cap->roce_dmtt_cl_size;
+
+ roce_cap->wqe_cl_start = dev_cap->roce_wqe_cl_start;
+ roce_cap->wqe_cl_end = dev_cap->roce_wqe_cl_end;
+ roce_cap->wqe_cl_sz = dev_cap->roce_wqe_cl_size;
+
+ sdk_info(hwdev->dev_hdl, "Get roce resource capbility\n");
+ sdk_info(hwdev->dev_hdl, "Max_qps=0x%x, max_cqs=0x%x, max_srqs=0x%x, max_mpts=0x%x\n",
+ roce_cap->max_qps, roce_cap->max_cqs,
+ roce_cap->max_srqs, roce_cap->max_mpts);
+
+ sdk_info(hwdev->dev_hdl, "Vf_max_qps=0x%x, vf_max_cqs=0x%x, vf_max_srqs= 0x%x, vf_max_mpts= 0x%x\n",
+ roce_cap->vf_max_qps, roce_cap->vf_max_cqs,
+ roce_cap->vf_max_srqs, roce_cap->vf_max_mpts);
+
+ sdk_info(hwdev->dev_hdl, "Cmtt_start=0x%x, cmtt_end=0x%x, cmtt_sz=0x%x\n",
+ roce_cap->cmtt_cl_start, roce_cap->cmtt_cl_end,
+ roce_cap->cmtt_cl_sz);
+
+ sdk_info(hwdev->dev_hdl, "Dmtt_start=0x%x, dmtt_end=0x%x, dmtt_sz=0x%x\n",
+ roce_cap->dmtt_cl_start, roce_cap->dmtt_cl_end,
+ roce_cap->dmtt_cl_sz);
+
+ sdk_info(hwdev->dev_hdl, "Wqe_start=0x%x, wqe_end=0x%x, wqe_sz=0x%x\n",
+ roce_cap->wqe_cl_start, roce_cap->wqe_cl_end,
+ roce_cap->wqe_cl_sz);
+
+ if (roce_cap->max_qps == 0) {
+ roce_cap->max_qps = 1024;
+ roce_cap->max_cqs = 2048;
+ roce_cap->max_srqs = 1024;
+ roce_cap->max_mpts = 1024;
+
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ roce_cap->vf_max_qps = 512;
+ roce_cap->vf_max_cqs = 1024;
+ roce_cap->vf_max_srqs = 512;
+ roce_cap->vf_max_mpts = 512;
+ }
+ }
+}
+
+static void parse_iwarp_res_cap(struct hinic_hwdev *hwdev,
+ struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+
+{
+ struct dev_iwarp_svc_own_cap *iwarp_cap =
+ &cap->rdma_cap.dev_rdma_cap.iwarp_own_cap;
+
+ iwarp_cap->max_qps = dev_cap->iwarp_max_qp;
+ iwarp_cap->max_cqs = dev_cap->iwarp_max_cq;
+ iwarp_cap->max_mpts = dev_cap->iwarp_max_mpt;
+ iwarp_cap->num_cos = dev_cap->max_cos_id + 1;
+
+ /* PF/PPF */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ iwarp_cap->vf_max_qps = dev_cap->iwarp_vf_max_qp;
+ iwarp_cap->vf_max_cqs = dev_cap->iwarp_vf_max_cq;
+ iwarp_cap->vf_max_mpts = dev_cap->iwarp_vf_max_mpt;
+ } else {
+ iwarp_cap->vf_max_qps = 0;
+ iwarp_cap->vf_max_cqs = 0;
+ iwarp_cap->vf_max_mpts = 0;
+ }
+
+ iwarp_cap->cmtt_cl_start = dev_cap->iwarp_cmtt_cl_start;
+ iwarp_cap->cmtt_cl_end = dev_cap->iwarp_cmtt_cl_end;
+ iwarp_cap->cmtt_cl_sz = dev_cap->iwarp_cmtt_cl_size;
+
+ iwarp_cap->dmtt_cl_start = dev_cap->iwarp_dmtt_cl_start;
+ iwarp_cap->dmtt_cl_end = dev_cap->iwarp_dmtt_cl_end;
+ iwarp_cap->dmtt_cl_sz = dev_cap->iwarp_dmtt_cl_size;
+
+ iwarp_cap->wqe_cl_start = dev_cap->iwarp_wqe_cl_start;
+ iwarp_cap->wqe_cl_end = dev_cap->iwarp_wqe_cl_end;
+ iwarp_cap->wqe_cl_sz = dev_cap->iwarp_wqe_cl_size;
+
+ sdk_info(hwdev->dev_hdl, "Get iwrap resource capbility\n");
+ sdk_info(hwdev->dev_hdl, "Max_qps=0x%x, max_cqs=0x%x, max_mpts=0x%x\n",
+ iwarp_cap->max_qps, iwarp_cap->max_cqs,
+ iwarp_cap->max_mpts);
+ sdk_info(hwdev->dev_hdl, "Vf_max_qps=0x%x, vf_max_cqs=0x%x, vf_max_mpts=0x%x\n",
+ iwarp_cap->vf_max_qps, iwarp_cap->vf_max_cqs,
+ iwarp_cap->vf_max_mpts);
+
+ sdk_info(hwdev->dev_hdl, "Cmtt_start=0x%x, cmtt_end=0x%x, cmtt_sz=0x%x\n",
+ iwarp_cap->cmtt_cl_start, iwarp_cap->cmtt_cl_end,
+ iwarp_cap->cmtt_cl_sz);
+
+ sdk_info(hwdev->dev_hdl, "Dmtt_start=0x%x, dmtt_end=0x%x, dmtt_sz=0x%x\n",
+ iwarp_cap->dmtt_cl_start, iwarp_cap->dmtt_cl_end,
+ iwarp_cap->dmtt_cl_sz);
+
+ sdk_info(hwdev->dev_hdl, "Wqe_start=0x%x, wqe_end=0x%x, wqe_sz=0x%x\n",
+ iwarp_cap->wqe_cl_start, iwarp_cap->wqe_cl_end,
+ iwarp_cap->wqe_cl_sz);
+
+ if (iwarp_cap->max_qps == 0) {
+ iwarp_cap->max_qps = 8;
+ iwarp_cap->max_cqs = 16;
+ iwarp_cap->max_mpts = 8;
+
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ iwarp_cap->vf_max_qps = 8;
+ iwarp_cap->vf_max_cqs = 16;
+ iwarp_cap->vf_max_mpts = 8;
+ }
+ }
+}
+
+static void parse_fcoe_res_cap(struct hinic_hwdev *hwdev,
+ struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_fcoe_svc_cap *fcoe_cap = &cap->fcoe_cap.dev_fcoe_cap;
+
+ fcoe_cap->max_qps = dev_cap->fcoe_max_qp;
+ fcoe_cap->max_cqs = dev_cap->fcoe_max_cq;
+ fcoe_cap->max_srqs = dev_cap->fcoe_max_srq;
+ fcoe_cap->max_cctxs = dev_cap->fcoe_max_cctx;
+ fcoe_cap->cctxs_id_start = dev_cap->fcoe_cctx_id_start;
+ fcoe_cap->vp_id_start = dev_cap->fcoe_vp_id_start;
+ fcoe_cap->vp_id_end = dev_cap->fcoe_vp_id_end;
+
+ sdk_info(hwdev->dev_hdl, "Get fcoe resource capbility\n");
+ sdk_info(hwdev->dev_hdl, "Max_qps=0x%x, max_cqs=0x%x, max_srqs=0x%x, max_cctxs=0x%x, cctxs_id_start=0x%x\n",
+ fcoe_cap->max_qps, fcoe_cap->max_cqs, fcoe_cap->max_srqs,
+ fcoe_cap->max_cctxs, fcoe_cap->cctxs_id_start);
+ sdk_info(hwdev->dev_hdl, "Vp_id_start=0x%x, vp_id_end=0x%x\n",
+ fcoe_cap->vp_id_start, fcoe_cap->vp_id_end);
+}
+
+static void parse_toe_res_cap(struct hinic_hwdev *hwdev,
+ struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_toe_svc_cap *toe_cap = &cap->toe_cap.dev_toe_cap;
+
+ toe_cap->max_pctxs = dev_cap->toe_max_pctx;
+ toe_cap->max_cqs = dev_cap->toe_max_cq;
+ toe_cap->max_srqs = dev_cap->toe_max_srq;
+ toe_cap->srq_id_start = dev_cap->toe_srq_id_start;
+ toe_cap->num_cos = dev_cap->max_cos_id + 1;
+
+ sdk_info(hwdev->dev_hdl, "Get toe resource capbility, max_pctxs=0x%x, max_cqs=0x%x, max_srqs=0x%x, srq_id_start=0x%x\n",
+ toe_cap->max_pctxs, toe_cap->max_cqs, toe_cap->max_srqs,
+ toe_cap->srq_id_start);
+}
+
+static void parse_fc_res_cap(struct hinic_hwdev *hwdev,
+ struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap;
+
+ fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx;
+ fc_cap->scq_num = dev_cap->fc_max_scq;
+ fc_cap->srq_num = dev_cap->fc_max_srq;
+ fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx;
+ fc_cap->child_qpc_id_start = dev_cap->fc_cctx_id_start;
+ fc_cap->vp_id_start = dev_cap->fc_vp_id_start;
+ fc_cap->vp_id_end = dev_cap->fc_vp_id_end;
+
+ sdk_info(hwdev->dev_hdl, "Get fc resource capbility\n");
+ sdk_info(hwdev->dev_hdl, "Max_parent_qpc_num=0x%x, scq_num=0x%x, srq_num=0x%x, max_child_qpc_num=0x%x, child_qpc_id_start=0x%x\n",
+ fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num,
+ fc_cap->max_child_qpc_num, fc_cap->child_qpc_id_start);
+ sdk_info(hwdev->dev_hdl, "Vp_id_start=0x%x, vp_id_end=0x%x\n",
+ fc_cap->vp_id_start, fc_cap->vp_id_end);
+}
+
+static void parse_ovs_res_cap(struct hinic_hwdev *hwdev,
+ struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct ovs_service_cap *ovs_cap = &cap->ovs_cap;
+
+ ovs_cap->dev_ovs_cap.max_pctxs = dev_cap->ovs_max_qpc;
+ ovs_cap->dev_ovs_cap.max_cqs = 0;
+
+ if (type == TYPE_PF || type == TYPE_PPF)
+ ovs_cap->dev_ovs_cap.dynamic_qp_en = dev_cap->ovs_dq_en;
+
+ sdk_info(hwdev->dev_hdl, "Get ovs resource capbility, max_qpc: 0x%x\n",
+ ovs_cap->dev_ovs_cap.max_pctxs);
+}
+
+static void parse_acl_res_cap(struct service_cap *cap,
+ struct hinic_dev_cap *dev_cap,
+ enum func_type type)
+{
+ struct acl_service_cap *acl_cap = &cap->acl_cap;
+
+ acl_cap->dev_acl_cap.max_pctxs = 1024 * 1024;
+ acl_cap->dev_acl_cap.max_cqs = 8;
+}
+
+static void parse_dev_cap(struct hinic_hwdev *dev,
+ struct hinic_dev_cap *dev_cap, enum func_type type)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+
+ /* Public resource */
+ parse_pub_res_cap(dev, cap, dev_cap, type);
+
+ /* PPF managed dynamic resource */
+ if (type == TYPE_PPF)
+ parse_dynamic_share_res_cap(dev, cap, dev_cap, type);
+
+ /* L2 NIC resource */
+ if (IS_NIC_TYPE(dev))
+ parse_l2nic_res_cap(dev, cap, dev_cap, type);
+
+ /* FCoE/IOE/TOE/FC without virtulization */
+ if (type == TYPE_PF || type == TYPE_PPF) {
+ if (IS_FC_TYPE(dev))
+ parse_fc_res_cap(dev, cap, dev_cap, type);
+
+ if (IS_FCOE_TYPE(dev))
+ parse_fcoe_res_cap(dev, cap, dev_cap, type);
+
+ if (IS_TOE_TYPE(dev))
+ parse_toe_res_cap(dev, cap, dev_cap, type);
+ }
+
+ /* RoCE resource */
+ if (IS_ROCE_TYPE(dev))
+ parse_roce_res_cap(dev, cap, dev_cap, type);
+
+ /* iWARP resource */
+ if (IS_IWARP_TYPE(dev))
+ parse_iwarp_res_cap(dev, cap, dev_cap, type);
+
+ if (IS_OVS_TYPE(dev))
+ parse_ovs_res_cap(dev, cap, dev_cap, type);
+
+ if (IS_ACL_TYPE(dev))
+ parse_acl_res_cap(cap, dev_cap, type);
+}
+
+static int get_cap_from_fw(struct hinic_hwdev *dev, enum func_type type)
+{
+ struct hinic_dev_cap dev_cap = {0};
+ u16 out_len = sizeof(dev_cap);
+ int err;
+
+ dev_cap.version = HINIC_CMD_VER_FUNC_ID;
+ err = hinic_global_func_id_get(dev, &dev_cap.func_id);
+ if (err)
+ return err;
+
+ sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %d\n",
+ dev_cap.func_id);
+
+ err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_NIC_CAP,
+ &dev_cap, sizeof(dev_cap),
+ &dev_cap, &out_len, 0);
+ if (err || dev_cap.status || !out_len) {
+ sdk_err(dev->dev_hdl,
+ "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, dev_cap.status, out_len);
+ return -EFAULT;
+ }
+
+ parse_dev_cap(dev, &dev_cap, type);
+ return 0;
+}
+
+static int get_cap_from_pf(struct hinic_hwdev *dev, enum func_type type)
+{
+ struct hinic_dev_cap dev_cap = {0};
+ u16 in_len, out_len;
+ int err;
+
+ in_len = sizeof(dev_cap);
+ out_len = in_len;
+
+ err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_MBOX_CAP,
+ &dev_cap, in_len, &dev_cap, &out_len, 0);
+ if (err || dev_cap.status || !out_len) {
+ sdk_err(dev->dev_hdl, "Failed to get capability from PF, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, dev_cap.status, out_len);
+ return -EFAULT;
+ }
+
+ parse_dev_cap(dev, &dev_cap, type);
+ return 0;
+}
+
+static int get_dev_cap(struct hinic_hwdev *dev)
+{
+ int err;
+ enum func_type type = HINIC_FUNC_TYPE(dev);
+
+ switch (type) {
+ case TYPE_PF:
+ case TYPE_PPF:
+ err = get_cap_from_fw(dev, type);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n");
+ return err;
+ }
+ break;
+ case TYPE_VF:
+ err = get_cap_from_pf(dev, type);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to get VF capability\n");
+ return err;
+ }
+ break;
+ default:
+ sdk_err(dev->dev_hdl, "Unsupported PCI Function type: %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void nic_param_fix(struct hinic_hwdev *dev)
+{
+ struct nic_service_cap *nic_cap = &dev->cfg_mgmt->svc_cap.nic_cap;
+
+ if ((hinic_func_type(dev) == TYPE_VF) &&
+ nic_cap->max_queue_allowed != 0) {
+ nic_cap->max_rqs = nic_cap->max_queue_allowed;
+ nic_cap->max_sqs = nic_cap->max_queue_allowed;
+ }
+}
+
+static void rdma_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct rdma_service_cap *rdma_cap = &cap->rdma_cap;
+ struct dev_roce_svc_own_cap *roce_cap =
+ &rdma_cap->dev_rdma_cap.roce_own_cap;
+ struct dev_iwarp_svc_own_cap *iwarp_cap =
+ &rdma_cap->dev_rdma_cap.iwarp_own_cap;
+
+ rdma_cap->log_mtt = LOG_MTT_SEG;
+ rdma_cap->log_rdmarc = LOG_RDMARC_SEG;
+ rdma_cap->reserved_qps = RDMA_RSVD_QPS;
+ rdma_cap->max_sq_sg = RDMA_MAX_SQ_SGE;
+
+ /* RoCE */
+ if (IS_ROCE_TYPE(dev)) {
+ roce_cap->qpc_entry_sz = ROCE_QPC_ENTRY_SZ;
+ roce_cap->max_wqes = ROCE_MAX_WQES;
+ roce_cap->max_rq_sg = ROCE_MAX_RQ_SGE;
+ roce_cap->max_sq_inline_data_sz = ROCE_MAX_SQ_INLINE_DATA_SZ;
+ roce_cap->max_rq_desc_sz = ROCE_MAX_RQ_DESC_SZ;
+ roce_cap->rdmarc_entry_sz = ROCE_RDMARC_ENTRY_SZ;
+ roce_cap->max_qp_init_rdma = ROCE_MAX_QP_INIT_RDMA;
+ roce_cap->max_qp_dest_rdma = ROCE_MAX_QP_DEST_RDMA;
+ roce_cap->max_srq_wqes = ROCE_MAX_SRQ_WQES;
+ roce_cap->reserved_srqs = ROCE_RSVD_SRQS;
+ roce_cap->max_srq_sge = ROCE_MAX_SRQ_SGE;
+ roce_cap->srqc_entry_sz = ROCE_SRQC_ENTERY_SZ;
+ roce_cap->max_msg_sz = ROCE_MAX_MSG_SZ;
+ } else {
+ iwarp_cap->qpc_entry_sz = IWARP_QPC_ENTRY_SZ;
+ iwarp_cap->max_wqes = IWARP_MAX_WQES;
+ iwarp_cap->max_rq_sg = IWARP_MAX_RQ_SGE;
+ iwarp_cap->max_sq_inline_data_sz = IWARP_MAX_SQ_INLINE_DATA_SZ;
+ iwarp_cap->max_rq_desc_sz = IWARP_MAX_RQ_DESC_SZ;
+ iwarp_cap->max_irq_depth = IWARP_MAX_IRQ_DEPTH;
+ iwarp_cap->irq_entry_size = IWARP_IRQ_ENTRY_SZ;
+ iwarp_cap->max_orq_depth = IWARP_MAX_ORQ_DEPTH;
+ iwarp_cap->orq_entry_size = IWARP_ORQ_ENTRY_SZ;
+ iwarp_cap->max_rtoq_depth = IWARP_MAX_RTOQ_DEPTH;
+ iwarp_cap->rtoq_entry_size = IWARP_RTOQ_ENTRY_SZ;
+ iwarp_cap->max_ackq_depth = IWARP_MAX_ACKQ_DEPTH;
+ iwarp_cap->ackq_entry_size = IWARP_ACKQ_ENTRY_SZ;
+ iwarp_cap->max_msg_sz = IWARP_MAX_MSG_SZ;
+ }
+
+ rdma_cap->max_sq_desc_sz = RDMA_MAX_SQ_DESC_SZ;
+ rdma_cap->wqebb_size = WQEBB_SZ;
+ rdma_cap->max_cqes = RDMA_MAX_CQES;
+ rdma_cap->reserved_cqs = RDMA_RSVD_CQS;
+ rdma_cap->cqc_entry_sz = RDMA_CQC_ENTRY_SZ;
+ rdma_cap->cqe_size = RDMA_CQE_SZ;
+ rdma_cap->reserved_mrws = RDMA_RSVD_MRWS;
+ rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ;
+
+ /* 2^8 - 1
+ * +------------------------+-----------+
+ * | 4B | 1M(20b) | Key(8b) |
+ * +------------------------+-----------+
+ * key = 8bit key + 24bit index,
+ * now Lkey of SGE uses 2bit(bit31 and bit30), so key only have 10bit,
+ * we use original 8bits directly for simpilification
+ */
+ rdma_cap->max_fmr_maps = 255;
+ rdma_cap->num_mtts = (g_rdma_mtts_num > 0 ?
+ g_rdma_mtts_num : RDMA_NUM_MTTS);
+ rdma_cap->log_mtt_seg = LOG_MTT_SEG;
+ rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ;
+ rdma_cap->log_rdmarc_seg = LOG_RDMARC_SEG;
+ rdma_cap->local_ca_ack_delay = LOCAL_ACK_DELAY;
+ rdma_cap->num_ports = RDMA_NUM_PORTS;
+ rdma_cap->db_page_size = DB_PAGE_SZ;
+ rdma_cap->direct_wqe_size = DWQE_SZ;
+ rdma_cap->num_pds = NUM_PD;
+ rdma_cap->reserved_pds = RSVD_PD;
+ rdma_cap->max_xrcds = MAX_XRCDS;
+ rdma_cap->reserved_xrcds = RSVD_XRCDS;
+ rdma_cap->max_gid_per_port = MAX_GID_PER_PORT;
+ rdma_cap->gid_entry_sz = GID_ENTRY_SZ;
+ rdma_cap->reserved_lkey = RSVD_LKEY;
+ rdma_cap->num_comp_vectors = (u32)dev->cfg_mgmt->eq_info.num_ceq;
+ rdma_cap->page_size_cap = PAGE_SZ_CAP;
+ rdma_cap->flags = (RDMA_BMME_FLAG_LOCAL_INV |
+ RDMA_BMME_FLAG_REMOTE_INV |
+ RDMA_BMME_FLAG_FAST_REG_WR |
+ RDMA_DEV_CAP_FLAG_XRC |
+ RDMA_DEV_CAP_FLAG_MEM_WINDOW |
+ RDMA_BMME_FLAG_TYPE_2_WIN |
+ RDMA_BMME_FLAG_WIN_TYPE_2B |
+ RDMA_DEV_CAP_FLAG_ATOMIC);
+ rdma_cap->max_frpl_len = MAX_FRPL_LEN;
+ rdma_cap->max_pkeys = MAX_PKEYS;
+}
+
+static void fcoe_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct fcoe_service_cap *fcoe_cap = &cap->fcoe_cap;
+
+ fcoe_cap->qpc_basic_size = FCOE_PCTX_SZ;
+ fcoe_cap->childc_basic_size = FCOE_CCTX_SZ;
+ fcoe_cap->sqe_size = FCOE_SQE_SZ;
+
+ fcoe_cap->scqc_basic_size = FCOE_SCQC_SZ;
+ fcoe_cap->scqe_size = FCOE_SCQE_SZ;
+
+ fcoe_cap->srqc_size = FCOE_SRQC_SZ;
+ fcoe_cap->srqe_size = FCOE_SRQE_SZ;
+}
+
+static void toe_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct toe_service_cap *toe_cap = &cap->toe_cap;
+
+ toe_cap->pctx_sz = TOE_PCTX_SZ;
+ toe_cap->scqc_sz = TOE_CQC_SZ;
+}
+
+static void fc_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct fc_service_cap *fc_cap = &cap->fc_cap;
+
+ fc_cap->parent_qpc_size = FC_PCTX_SZ;
+ fc_cap->child_qpc_size = FC_CCTX_SZ;
+ fc_cap->sqe_size = FC_SQE_SZ;
+
+ fc_cap->scqc_size = FC_SCQC_SZ;
+ fc_cap->scqe_size = FC_SCQE_SZ;
+
+ fc_cap->srqc_size = FC_SRQC_SZ;
+ fc_cap->srqe_size = FC_SRQE_SZ;
+}
+
+static void ovs_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct ovs_service_cap *ovs_cap = &cap->ovs_cap;
+
+ ovs_cap->pctx_sz = OVS_PCTX_SZ;
+ ovs_cap->scqc_sz = OVS_SCQC_SZ;
+}
+
+static void acl_param_fix(struct hinic_hwdev *dev)
+{
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct acl_service_cap *acl_cap = &cap->acl_cap;
+
+ acl_cap->pctx_sz = ACL_PCTX_SZ;
+ acl_cap->scqc_sz = ACL_SCQC_SZ;
+}
+
+static void init_service_param(struct hinic_hwdev *dev)
+{
+ if (IS_NIC_TYPE(dev))
+ nic_param_fix(dev);
+
+ if (IS_RDMA_TYPE(dev))
+ rdma_param_fix(dev);
+
+ if (IS_FCOE_TYPE(dev))
+ fcoe_param_fix(dev);
+
+ if (IS_TOE_TYPE(dev))
+ toe_param_fix(dev);
+
+ if (IS_FC_TYPE(dev))
+ fc_param_fix(dev);
+
+ if (IS_OVS_TYPE(dev))
+ ovs_param_fix(dev);
+
+ if (IS_ACL_TYPE(dev))
+ acl_param_fix(dev);
+}
+
+static void cfg_get_eq_num(struct hinic_hwdev *dev)
+{
+ struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info;
+
+ eq_info->num_ceq = dev->hwif->attr.num_ceqs;
+ eq_info->num_ceq_remain = eq_info->num_ceq;
+}
+
+static int cfg_init_eq(struct hinic_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ struct cfg_eq *eq;
+ u8 num_ceq, i = 0;
+
+ cfg_get_eq_num(dev);
+ num_ceq = cfg_mgmt->eq_info.num_ceq;
+
+ sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n",
+ cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain);
+
+ if (!num_ceq) {
+ sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+ eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL);
+ if (!eq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_ceq; ++i) {
+ eq[i].eqn = i;
+ eq[i].free = CFG_FREE;
+ eq[i].type = SERVICE_T_MAX;
+ }
+
+ cfg_mgmt->eq_info.eq = eq;
+ mutex_init(&cfg_mgmt->eq_info.eq_mutex);
+
+ return 0;
+}
+
+int hinic_dev_ver_info(void *hwdev, struct dev_version_info *ver)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+
+ if (!hwdev || !ver)
+ return -EINVAL;
+
+ cfg_mgmt = dev->cfg_mgmt;
+
+ memcpy(ver, &cfg_mgmt->svc_cap.dev_ver_info, sizeof(*ver));
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_dev_ver_info);
+
+int hinic_vector_to_eqn(void *hwdev, enum hinic_service_type type, int vector)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_eq *eq;
+ int eqn = -EINVAL;
+
+ if (!hwdev || vector < 0)
+ return -EINVAL;
+
+ if (type != SERVICE_T_ROCE && type != SERVICE_T_IWARP) {
+ sdk_err(dev->dev_hdl,
+ "Service type: %d, only RDMA service could get eqn by vector\n",
+ type);
+ return -EINVAL;
+ }
+
+ cfg_mgmt = dev->cfg_mgmt;
+ vector = (vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE;
+
+ eq = cfg_mgmt->eq_info.eq;
+ if ((eq[vector].type == SERVICE_T_ROCE ||
+ eq[vector].type == SERVICE_T_IWARP) &&
+ eq[vector].free == CFG_BUSY)
+ eqn = eq[vector].eqn;
+
+ return eqn;
+}
+EXPORT_SYMBOL(hinic_vector_to_eqn);
+
+static int cfg_init_interrupt(struct hinic_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info;
+ u16 intr_num = dev->hwif->attr.num_irqs;
+
+ if (!intr_num) {
+ sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+ irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info),
+ GFP_KERNEL);
+ if (!irq_info->alloc_info)
+ return -ENOMEM;
+
+ irq_info->num_irq_hw = intr_num;
+
+ /* Production requires VF only surppots MSI-X */
+ if (HINIC_FUNC_TYPE(dev) == TYPE_VF)
+ cfg_mgmt->svc_cap.interrupt_type = INTR_TYPE_MSIX;
+ else
+ cfg_mgmt->svc_cap.interrupt_type = intr_mode;
+ mutex_init(&irq_info->irq_mutex);
+ return 0;
+}
+
+static int cfg_enable_interrupt(struct hinic_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw;
+
+ void *pcidev = dev->pcidev_hdl;
+ struct irq_alloc_info_st *irq_info;
+ struct msix_entry *entry;
+ u16 i = 0;
+ int actual_irq;
+
+ irq_info = cfg_mgmt->irq_param_info.alloc_info;
+
+ sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %d\n",
+ cfg_mgmt->svc_cap.interrupt_type, nreq);
+
+ switch (cfg_mgmt->svc_cap.interrupt_type) {
+ case INTR_TYPE_MSIX:
+ if (!nreq) {
+ sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n");
+ return -EINVAL;
+ }
+ entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ for (i = 0; i < nreq; i++)
+ entry[i].entry = i;
+
+ actual_irq = pci_enable_msix_range(pcidev, entry,
+ VECTOR_THRESHOLD, nreq);
+ if (actual_irq < 0) {
+ sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed\n");
+ kfree(entry);
+ return -ENOMEM;
+ }
+
+ nreq = (u16)actual_irq;
+ cfg_mgmt->irq_param_info.num_total = nreq;
+ cfg_mgmt->irq_param_info.num_irq_remain = nreq;
+ sdk_info(dev->dev_hdl, "Request %d msix vector success\n",
+ nreq);
+
+ for (i = 0; i < nreq; ++i) {
+ /* u16 driver uses to specify entry, OS writes */
+ irq_info[i].info.msix_entry_idx = entry[i].entry;
+ /* u32 kernel uses to write allocated vector */
+ irq_info[i].info.irq_id = entry[i].vector;
+ irq_info[i].type = SERVICE_T_MAX;
+ irq_info[i].free = CFG_FREE;
+ }
+
+ kfree(entry);
+
+ break;
+
+ default:
+ sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n",
+ cfg_mgmt->svc_cap.interrupt_type);
+ break;
+ }
+
+ return 0;
+}
+
+int hinic_alloc_irqs(void *hwdev, enum hinic_service_type type, u16 num,
+ struct irq_info *irq_info_array, u16 *act_num)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_irq_info *irq_info;
+ struct irq_alloc_info_st *alloc_info;
+ int max_num_irq;
+ u16 free_num_irq;
+ int i, j;
+
+ if (!hwdev || !irq_info_array || !act_num)
+ return -EINVAL;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ irq_info = &cfg_mgmt->irq_param_info;
+ alloc_info = irq_info->alloc_info;
+ max_num_irq = irq_info->num_total;
+ free_num_irq = irq_info->num_irq_remain;
+
+ mutex_lock(&irq_info->irq_mutex);
+
+ if (num > free_num_irq) {
+ if (free_num_irq == 0) {
+ sdk_err(dev->dev_hdl,
+ "no free irq resource in cfg mgmt\n");
+ mutex_unlock(&irq_info->irq_mutex);
+ return -ENOMEM;
+ }
+
+ sdk_warn(dev->dev_hdl, "only %d irq resource in cfg mgmt\n",
+ free_num_irq);
+ num = free_num_irq;
+ }
+
+ *act_num = 0;
+
+ for (i = 0; i < num; i++) {
+ for (j = 0; j < max_num_irq; j++) {
+ if (alloc_info[j].free == CFG_FREE) {
+ if (irq_info->num_irq_remain == 0) {
+ sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n");
+ mutex_unlock(&irq_info->irq_mutex);
+ return -EINVAL;
+ }
+ alloc_info[j].type = type;
+ alloc_info[j].free = CFG_BUSY;
+
+ irq_info_array[i].msix_entry_idx =
+ alloc_info[j].info.msix_entry_idx;
+ irq_info_array[i].irq_id =
+ alloc_info[j].info.irq_id;
+ (*act_num)++;
+ irq_info->num_irq_remain--;
+
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&irq_info->irq_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(hinic_alloc_irqs);
+
+void hinic_free_irq(void *hwdev, enum hinic_service_type type, u32 irq_id)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_irq_info *irq_info;
+ struct irq_alloc_info_st *alloc_info;
+ int max_num_irq;
+ int i;
+
+ if (!hwdev)
+ return;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ irq_info = &cfg_mgmt->irq_param_info;
+ alloc_info = irq_info->alloc_info;
+ max_num_irq = irq_info->num_total;
+
+ mutex_lock(&irq_info->irq_mutex);
+
+ for (i = 0; i < max_num_irq; i++) {
+ if (irq_id == alloc_info[i].info.irq_id &&
+ type == alloc_info[i].type) {
+ if (alloc_info[i].free == CFG_BUSY) {
+ alloc_info[i].free = CFG_FREE;
+ irq_info->num_irq_remain++;
+ if (irq_info->num_irq_remain > max_num_irq) {
+ sdk_err(dev->dev_hdl, "Find target, but over range\n");
+ mutex_unlock(&irq_info->irq_mutex);
+ return;
+ }
+ break;
+ }
+ }
+ }
+
+ if (i >= max_num_irq)
+ sdk_warn(dev->dev_hdl, "Irq %d don't need to free\n", irq_id);
+
+ mutex_unlock(&irq_info->irq_mutex);
+}
+EXPORT_SYMBOL(hinic_free_irq);
+
+int hinic_vector_to_irq(void *hwdev, enum hinic_service_type type, int vector)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct irq_alloc_info_st *irq_info;
+ int irq = -EINVAL;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ if (type != SERVICE_T_ROCE && type != SERVICE_T_IWARP) {
+ sdk_err(dev->dev_hdl,
+ "Service type: %u, only RDMA service could get eqn by vector\n",
+ type);
+ return -EINVAL;
+ }
+
+ /* Current RDMA CEQ are 2 - 31, will change in the future */
+ vector = ((vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE);
+
+ irq_info = cfg_mgmt->irq_param_info.alloc_info;
+ if (irq_info[vector].type == SERVICE_T_ROCE ||
+ irq_info[vector].type == SERVICE_T_IWARP)
+ if (irq_info[vector].free == CFG_BUSY)
+ irq = (int)irq_info[vector].info.irq_id;
+
+ return irq;
+}
+EXPORT_SYMBOL(hinic_vector_to_irq);
+
+int hinic_alloc_ceqs(void *hwdev, enum hinic_service_type type, int num,
+ int *ceq_id_array, int *act_num)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_eq_info *eq;
+ int free_ceq;
+ int i, j;
+
+ if (!hwdev || !ceq_id_array || !act_num)
+ return -EINVAL;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ eq = &cfg_mgmt->eq_info;
+ free_ceq = eq->num_ceq_remain;
+
+ mutex_lock(&eq->eq_mutex);
+
+ if (num > free_ceq) {
+ if (free_ceq <= 0) {
+ sdk_err(dev->dev_hdl, "No free ceq resource in cfg mgmt\n");
+ mutex_unlock(&eq->eq_mutex);
+ return -ENOMEM;
+ }
+
+ sdk_warn(dev->dev_hdl, "Only %d ceq resource in cfg mgmt\n",
+ free_ceq);
+ }
+
+ *act_num = 0;
+
+ num = min(num, eq->num_ceq - CFG_RDMA_CEQ_BASE);
+ for (i = 0; i < num; i++) {
+ if (eq->num_ceq_remain == 0) {
+ sdk_warn(dev->dev_hdl, "Alloc %d ceqs, less than required %d ceqs\n",
+ *act_num, num);
+ mutex_unlock(&eq->eq_mutex);
+ return 0;
+ }
+
+ for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) {
+ if (eq->eq[j].free == CFG_FREE) {
+ eq->eq[j].type = type;
+ eq->eq[j].free = CFG_BUSY;
+ eq->num_ceq_remain--;
+ ceq_id_array[i] = eq->eq[j].eqn;
+ (*act_num)++;
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&eq->eq_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(hinic_alloc_ceqs);
+
+void hinic_free_ceq(void *hwdev, enum hinic_service_type type, int ceq_id)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct cfg_eq_info *eq;
+ u8 num_ceq;
+ u8 i = 0;
+
+ if (!hwdev)
+ return;
+
+ cfg_mgmt = dev->cfg_mgmt;
+ eq = &cfg_mgmt->eq_info;
+ num_ceq = eq->num_ceq;
+
+ mutex_lock(&eq->eq_mutex);
+
+ for (i = 0; i < num_ceq; i++) {
+ if (ceq_id == eq->eq[i].eqn &&
+ type == cfg_mgmt->eq_info.eq[i].type) {
+ if (eq->eq[i].free == CFG_BUSY) {
+ eq->eq[i].free = CFG_FREE;
+ eq->num_ceq_remain++;
+ if (eq->num_ceq_remain > num_ceq)
+ eq->num_ceq_remain %= num_ceq;
+
+ mutex_unlock(&eq->eq_mutex);
+ return;
+ }
+ }
+ }
+
+ if (i >= num_ceq)
+ sdk_warn(dev->dev_hdl, "ceq %d don't need to free\n", ceq_id);
+
+ mutex_unlock(&eq->eq_mutex);
+}
+EXPORT_SYMBOL(hinic_free_ceq);
+
+static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_dev_cap *dev_cap = buf_out;
+ struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+ struct nic_service_cap *nic_cap = &cap->nic_cap;
+ struct dev_roce_svc_own_cap *roce_cap =
+ &cap->rdma_cap.dev_rdma_cap.roce_own_cap;
+ struct dev_iwarp_svc_own_cap *iwarp_cap =
+ &cap->rdma_cap.dev_rdma_cap.iwarp_own_cap;
+ struct dev_ovs_svc_cap *ovs_cap = &cap->ovs_cap.dev_ovs_cap;
+ struct hinic_dev_cap dev_cap_tmp = {0};
+ u16 out_len = 0;
+ u16 func_id;
+ int err;
+
+ memset(dev_cap, 0, sizeof(*dev_cap));
+
+ if (cap->sf_svc_attr.ft_en)
+ dev_cap->sf_svc_attr |= SF_SVC_FT_BIT;
+ else
+ dev_cap->sf_svc_attr &= ~SF_SVC_FT_BIT;
+
+ if (cap->sf_svc_attr.rdma_en)
+ dev_cap->sf_svc_attr |= SF_SVC_RDMA_BIT;
+ else
+ dev_cap->sf_svc_attr &= ~SF_SVC_RDMA_BIT;
+
+ dev_cap->sf_en_vf = cap->sf_svc_attr.sf_en_vf;
+
+ dev_cap->host_id = cap->host_id;
+ dev_cap->ep_id = cap->ep_id;
+ dev_cap->intr_type = cap->interrupt_type;
+ dev_cap->max_cos_id = cap->max_cos_id;
+ dev_cap->er_id = cap->er_id;
+ dev_cap->port_id = cap->port_id;
+ dev_cap->max_vf = cap->max_vf;
+ dev_cap->svc_cap_en = cap->chip_svc_type;
+ dev_cap->host_total_func = cap->host_total_function;
+ dev_cap->host_oq_id_mask_val = cap->host_oq_id_mask_val;
+ dev_cap->net_port_mode = cap->net_port_mode;
+
+ /* Parameters below is uninitialized because NIC and ROCE not use it
+ * max_connect_num
+ * max_stick2cache_num
+ * bfilter_start_addr
+ * bfilter_len
+ * hash_bucket_num
+ * cfg_file_ver
+ */
+
+ /* NIC VF resources */
+ dev_cap->nic_max_sq = nic_cap->vf_max_sqs;
+ dev_cap->nic_max_rq = nic_cap->vf_max_rqs;
+
+ /* ROCE VF resources */
+ dev_cap->roce_max_qp = roce_cap->vf_max_qps;
+ dev_cap->roce_max_cq = roce_cap->vf_max_cqs;
+ dev_cap->roce_max_srq = roce_cap->vf_max_srqs;
+ dev_cap->roce_max_mpt = roce_cap->vf_max_mpts;
+
+ dev_cap->roce_cmtt_cl_start = roce_cap->cmtt_cl_start;
+ dev_cap->roce_cmtt_cl_end = roce_cap->cmtt_cl_end;
+ dev_cap->roce_cmtt_cl_size = roce_cap->cmtt_cl_sz;
+
+ dev_cap->roce_dmtt_cl_start = roce_cap->dmtt_cl_start;
+ dev_cap->roce_dmtt_cl_end = roce_cap->dmtt_cl_end;
+ dev_cap->roce_dmtt_cl_size = roce_cap->dmtt_cl_sz;
+
+ dev_cap->roce_wqe_cl_start = roce_cap->wqe_cl_start;
+ dev_cap->roce_wqe_cl_end = roce_cap->wqe_cl_end;
+ dev_cap->roce_wqe_cl_size = roce_cap->wqe_cl_sz;
+
+ /* Iwarp VF resources */
+ dev_cap->iwarp_max_qp = iwarp_cap->vf_max_qps;
+ dev_cap->iwarp_max_cq = iwarp_cap->vf_max_cqs;
+ dev_cap->iwarp_max_mpt = iwarp_cap->vf_max_mpts;
+
+ /* OVS VF resources */
+ dev_cap->ovs_max_qpc = ovs_cap->max_pctxs;
+ dev_cap->ovs_dq_en = ovs_cap->dynamic_qp_en;
+
+ *out_size = sizeof(*dev_cap);
+
+ if (!IS_OVS_TYPE(dev))
+ return 0;
+
+ out_len = sizeof(dev_cap_tmp);
+ /* fixed qnum in ovs mode */
+ func_id = vf_id + hinic_glb_pf_vf_offset(hwdev);
+ dev_cap_tmp.func_id = func_id;
+ err = hinic_pf_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_FUNC_CAP,
+ &dev_cap_tmp, sizeof(dev_cap_tmp),
+ &dev_cap_tmp, &out_len, 0);
+ if (err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) {
+ sdk_err(dev->dev_hdl,
+ "Get func_id: %u capability from FW failed, err: %d, status: 0x%x, out_size: 0x%x\n",
+ func_id, err, dev_cap_tmp.status, out_len);
+ return -EFAULT;
+ } else if (err) {
+ return err;
+ }
+
+ dev_cap->nic_max_sq = dev_cap_tmp.nic_max_sq + 1;
+ dev_cap->nic_max_rq = dev_cap_tmp.nic_max_rq + 1;
+ dev_cap->max_queue_allowed = dev_cap_tmp.max_queue_allowed;
+
+ sdk_info(dev->dev_hdl, "func_id(%u) %s qnum %u max_queue_allowed %u\n",
+ func_id, (ovs_cap->dynamic_qp_en ? "dynamic" : "fixed"),
+ dev_cap->nic_max_sq, dev_cap->max_queue_allowed);
+
+ return 0;
+}
+
+static int cfg_mbx_ppf_proc_msg(void *hwdev, u16 pf_id, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ sdk_info(dev->dev_hdl, "ppf receive other pf cfgmgmt cmd %d mbox msg\n",
+ cmd);
+
+ return hinic_ppf_process_mbox_msg(hwdev, pf_id, vf_id, HINIC_MOD_CFGM,
+ cmd, buf_in, in_size, buf_out,
+ out_size);
+}
+
+static int cfg_mbx_vf_proc_msg(void *hwdev, u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ *out_size = 0;
+ sdk_err(dev->dev_hdl, "VF msg callback not supported\n");
+
+ return -EOPNOTSUPP;
+}
+
+static int cfg_mbx_init(struct hinic_hwdev *dev, struct cfg_mgmt_info *cfg_mgmt)
+{
+ int err;
+ enum func_type type = dev->hwif->attr.func_type;
+
+ if (type == TYPE_PF) {
+ err = hinic_register_pf_mbox_cb(dev, HINIC_MOD_CFGM,
+ cfg_mbx_pf_proc_vf_msg);
+ if (err) {
+ sdk_err(dev->dev_hdl,
+ "PF: Register PF mailbox callback failed\n");
+ return err;
+ }
+ } else if (type == TYPE_PPF) {
+ err = hinic_register_ppf_mbox_cb(dev, HINIC_MOD_CFGM,
+ cfg_mbx_ppf_proc_msg);
+ if (err) {
+ sdk_err(dev->dev_hdl,
+ "PPF: Register PPF mailbox callback failed\n");
+ return err;
+ }
+
+ err = hinic_register_pf_mbox_cb(dev, HINIC_MOD_CFGM,
+ cfg_mbx_pf_proc_vf_msg);
+ if (err) {
+ sdk_err(dev->dev_hdl,
+ "PPF: Register PF mailbox callback failed\n");
+ hinic_unregister_ppf_mbox_cb(dev, HINIC_MOD_CFGM);
+ return err;
+ }
+ } else if (type == TYPE_VF) {
+ err = hinic_register_vf_mbox_cb(dev, HINIC_MOD_CFGM,
+ cfg_mbx_vf_proc_msg);
+ if (err) {
+ sdk_err(dev->dev_hdl,
+ "VF: Register VF mailbox callback failed\n");
+ return err;
+ }
+ } else {
+ sdk_err(dev->dev_hdl, "Invalid func_type: %d, not supported\n",
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void cfg_mbx_cleanup(struct hinic_hwdev *dev)
+{
+ hinic_unregister_ppf_mbox_cb(dev, HINIC_MOD_CFGM);
+ hinic_unregister_pf_mbox_cb(dev, HINIC_MOD_CFGM);
+ hinic_unregister_vf_mbox_cb(dev, HINIC_MOD_CFGM);
+}
+
+static int init_cfg_mgmt(struct hinic_hwdev *dev)
+{
+ int err;
+ struct cfg_mgmt_info *cfg_mgmt;
+
+ cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
+ if (!cfg_mgmt)
+ return -ENOMEM;
+
+ dev->cfg_mgmt = cfg_mgmt;
+ cfg_mgmt->hwdev = dev;
+
+ err = cfg_init_eq(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n",
+ err);
+ goto free_mgmt_mem;
+ }
+
+ err = cfg_init_interrupt(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n",
+ err);
+ goto free_eq_mem;
+ }
+
+ err = cfg_enable_interrupt(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n",
+ err);
+ goto free_interrupt_mem;
+ }
+
+ return 0;
+
+free_interrupt_mem:
+ kfree(cfg_mgmt->irq_param_info.alloc_info);
+ cfg_mgmt->irq_param_info.alloc_info = NULL;
+
+free_eq_mem:
+ kfree(cfg_mgmt->eq_info.eq);
+ cfg_mgmt->eq_info.eq = NULL;
+
+free_mgmt_mem:
+ kfree(cfg_mgmt);
+ return err;
+}
+
+static void free_cfg_mgmt(struct hinic_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+
+ /* if the allocated resource were recycled */
+ if (cfg_mgmt->irq_param_info.num_irq_remain !=
+ cfg_mgmt->irq_param_info.num_total ||
+ cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq)
+ sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n");
+
+ switch (cfg_mgmt->svc_cap.interrupt_type) {
+ case INTR_TYPE_MSIX:
+ pci_disable_msix(dev->pcidev_hdl);
+ break;
+
+ case INTR_TYPE_MSI:
+ pci_disable_msi(dev->pcidev_hdl);
+ break;
+
+ case INTR_TYPE_INT:
+ default:
+ break;
+ }
+
+ kfree(cfg_mgmt->irq_param_info.alloc_info);
+ cfg_mgmt->irq_param_info.alloc_info = NULL;
+
+ kfree(cfg_mgmt->eq_info.eq);
+ cfg_mgmt->eq_info.eq = NULL;
+
+ kfree(cfg_mgmt);
+}
+
+static int init_capability(struct hinic_hwdev *dev)
+{
+ struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt;
+ int err;
+
+ set_cfg_test_param(cfg_mgmt);
+
+ err = cfg_mbx_init(dev, cfg_mgmt);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Configure mailbox init failed, err: %d\n",
+ err);
+ return err;
+ }
+
+ cfg_mgmt->svc_cap.sf_svc_attr.ft_pf_en = false;
+ cfg_mgmt->svc_cap.sf_svc_attr.rdma_pf_en = false;
+
+ err = get_dev_cap(dev);
+ if (err) {
+ cfg_mbx_cleanup(dev);
+ return err;
+ }
+
+ init_service_param(dev);
+
+ sdk_info(dev->dev_hdl, "Init capability success\n");
+ return 0;
+}
+
+static void free_capability(struct hinic_hwdev *dev)
+{
+ cfg_mbx_cleanup(dev);
+ sdk_info(dev->dev_hdl, "Free capability success");
+}
+
+/* 0 - MSIx, 1 - MSI, 2 - INTx */
+enum intr_type hinic_intr_type(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return INTR_TYPE_NONE;
+
+ return dev->cfg_mgmt->svc_cap.interrupt_type;
+}
+EXPORT_SYMBOL(hinic_intr_type);
+
+bool hinic_support_nic(void *hwdev, struct nic_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_NIC_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_nic);
+
+bool hinic_support_roce(void *hwdev, struct rdma_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_ROCE_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_roce);
+
+bool hinic_support_fcoe(void *hwdev, struct fcoe_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_FCOE_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.fcoe_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_fcoe);
+
+/* Only PPF support it, PF is not */
+bool hinic_support_toe(void *hwdev, struct toe_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_TOE_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_toe);
+
+bool hinic_support_iwarp(void *hwdev, struct rdma_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_IWARP_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_iwarp);
+
+bool hinic_support_fc(void *hwdev, struct fc_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_FC_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_fc);
+
+bool hinic_support_fic(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_FIC_TYPE(dev))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_fic);
+
+bool hinic_support_ovs(void *hwdev, struct ovs_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_OVS_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_ovs);
+
+bool hinic_support_acl(void *hwdev, struct acl_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_ACL_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.acl_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_acl);
+
+bool hinic_support_rdma(void *hwdev, struct rdma_service_cap *cap)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_RDMA_TYPE(dev))
+ return false;
+
+ if (cap)
+ memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap));
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_rdma);
+
+bool hinic_support_ft(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_FT_TYPE(dev))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(hinic_support_ft);
+
+bool hinic_support_dynamic_q(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ return dev->cfg_mgmt->svc_cap.nic_cap.dynamic_qp ? true : false;
+}
+
+bool hinic_func_for_mgmt(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (dev->cfg_mgmt->svc_cap.chip_svc_type >= CFG_SVC_NIC_BIT0)
+ return false;
+ else
+ return true;
+}
+
+bool hinic_func_for_hwpt(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (IS_HWPT_TYPE(dev))
+ return true;
+ else
+ return false;
+}
+
+bool hinic_func_for_pt(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (dev->cfg_mgmt->svc_cap.force_up)
+ return true;
+ else
+ return false;
+}
+
+int cfg_set_func_sf_en(void *hwdev, u32 enbits, u32 enmask)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct nic_misc_func_sf_enbits *func_sf_enbits;
+ u16 out_size = sizeof(*func_sf_enbits);
+ u16 glb_func_idx;
+ u16 api_info_len;
+ int err;
+
+ api_info_len = sizeof(struct nic_misc_func_sf_enbits);
+ func_sf_enbits = kzalloc(api_info_len, GFP_KERNEL);
+ if (!func_sf_enbits) {
+ sdk_err(dev->dev_hdl, "Alloc cfg api info failed\n");
+ return -ENOMEM;
+ }
+
+ err = hinic_global_func_id_get(dev, &glb_func_idx);
+ if (err) {
+ kfree(func_sf_enbits);
+ return err;
+ }
+
+ func_sf_enbits->stateful_enbits = enbits;
+ func_sf_enbits->stateful_enmask = enmask;
+ func_sf_enbits->function_id = glb_func_idx;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_MISC_SET_FUNC_SF_ENBITS,
+ (void *)func_sf_enbits, api_info_len,
+ (void *)func_sf_enbits, &out_size,
+ VSW_UP_CFG_TIMEOUT);
+ if (err || !out_size || func_sf_enbits->status) {
+ sdk_err(dev->dev_hdl,
+ "Failed to set stateful enable, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, func_sf_enbits->status, out_size);
+ kfree(func_sf_enbits);
+ return -EFAULT;
+ }
+
+ kfree(func_sf_enbits);
+ return 0;
+}
+
+int cfg_get_func_sf_en(void *hwdev, u32 *enbits)
+{
+ struct nic_misc_func_sf_enbits *func_sf_enbits;
+ struct hinic_hwdev *dev = hwdev;
+ u16 out_size = sizeof(*func_sf_enbits);
+ u16 glb_func_idx;
+ u16 api_info_len;
+ int err;
+
+ api_info_len = sizeof(struct nic_misc_func_sf_enbits);
+ func_sf_enbits = kzalloc(api_info_len, GFP_KERNEL);
+ if (!func_sf_enbits) {
+ sdk_err(dev->dev_hdl, "Alloc cfg api info failed\n");
+ return -ENOMEM;
+ }
+
+ err = hinic_global_func_id_get(dev, &glb_func_idx);
+ if (err) {
+ kfree(func_sf_enbits);
+ return err;
+ }
+
+ func_sf_enbits->function_id = glb_func_idx;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_MISC_GET_FUNC_SF_ENBITS,
+ (void *)func_sf_enbits, api_info_len,
+ (void *)func_sf_enbits, &out_size,
+ VSW_UP_CFG_TIMEOUT);
+ if (err || !out_size || func_sf_enbits->status) {
+ sdk_err(dev->dev_hdl, "Failed to get stateful enable, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, func_sf_enbits->status, out_size);
+ kfree(func_sf_enbits);
+ return -EFAULT;
+ }
+
+ *enbits = func_sf_enbits->stateful_enbits;
+
+ kfree(func_sf_enbits);
+ return 0;
+}
+
+int hinic_set_toe_enable(void *hwdev, bool enable)
+{
+ u32 enbits;
+ u32 enmask;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ enbits = VSW_SET_STATEFUL_BITS_TOE((u16)enable);
+ enmask = VSW_SET_STATEFUL_BITS_TOE(0x1U);
+
+ return cfg_set_func_sf_en(hwdev, enbits, enmask);
+}
+EXPORT_SYMBOL(hinic_set_toe_enable);
+
+bool hinic_get_toe_enable(void *hwdev)
+{
+ int err;
+ u32 enbits;
+
+ if (!hwdev)
+ return false;
+
+ err = cfg_get_func_sf_en(hwdev, &enbits);
+ if (err)
+ return false;
+
+ return VSW_GET_STATEFUL_BITS_TOE(enbits);
+}
+EXPORT_SYMBOL(hinic_get_toe_enable);
+
+int hinic_set_fcoe_enable(void *hwdev, bool enable)
+{
+ u32 enbits;
+ u32 enmask;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ enbits = VSW_SET_STATEFUL_BITS_FCOE((u16)enable);
+ enmask = VSW_SET_STATEFUL_BITS_FCOE(0x1U);
+
+ return cfg_set_func_sf_en(hwdev, enbits, enmask);
+}
+EXPORT_SYMBOL(hinic_set_fcoe_enable);
+
+bool hinic_get_fcoe_enable(void *hwdev)
+{
+ int err;
+ u32 enbits;
+
+ if (!hwdev)
+ return false;
+
+ err = cfg_get_func_sf_en(hwdev, &enbits);
+ if (err)
+ return false;
+
+ return VSW_GET_STATEFUL_BITS_FCOE(enbits);
+}
+EXPORT_SYMBOL(hinic_get_fcoe_enable);
+
+bool hinic_get_stateful_enable(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ return dev->cfg_mgmt->svc_cap.sf_en;
+}
+EXPORT_SYMBOL(hinic_get_stateful_enable);
+
+u8 hinic_host_oq_id_mask(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting host oq id mask\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val;
+}
+EXPORT_SYMBOL(hinic_host_oq_id_mask);
+
+u8 hinic_host_id(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting host id\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.host_id;
+}
+EXPORT_SYMBOL(hinic_host_id);
+
+u16 hinic_host_total_func(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting host total function number\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.host_total_function;
+}
+EXPORT_SYMBOL(hinic_host_total_func);
+
+u16 hinic_func_max_qnum(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting function max queue number\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.max_sqs;
+}
+EXPORT_SYMBOL(hinic_func_max_qnum);
+
+u16 hinic_func_max_nic_qnum(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting function max queue number\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs;
+}
+EXPORT_SYMBOL(hinic_func_max_nic_qnum);
+
+u8 hinic_ep_id(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting ep id\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.ep_id;
+}
+EXPORT_SYMBOL(hinic_ep_id);
+
+u8 hinic_er_id(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting er id\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.er_id;
+}
+EXPORT_SYMBOL(hinic_er_id);
+
+u8 hinic_physical_port_id(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting physical port id\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.port_id;
+}
+EXPORT_SYMBOL(hinic_physical_port_id);
+
+u8 hinic_func_max_vf(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting max vf number\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.max_vf;
+}
+EXPORT_SYMBOL(hinic_func_max_vf);
+
+u8 hinic_max_num_cos(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting max cos number\n");
+ return 0;
+ }
+ return (u8)(dev->cfg_mgmt->svc_cap.max_cos_id + 1);
+}
+EXPORT_SYMBOL(hinic_max_num_cos);
+
+u8 hinic_cos_valid_bitmap(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting cos valid bitmap\n");
+ return 0;
+ }
+ return (u8)(dev->cfg_mgmt->svc_cap.cos_valid_bitmap);
+}
+EXPORT_SYMBOL(hinic_cos_valid_bitmap);
+
+u8 hinic_net_port_mode(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting net port mode\n");
+ return 0;
+ }
+ return dev->cfg_mgmt->svc_cap.net_port_mode;
+}
+EXPORT_SYMBOL(hinic_net_port_mode);
+
+bool hinic_is_hwdev_mod_inited(void *hwdev, enum hinic_hwdev_init_state state)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev || state >= HINIC_HWDEV_MAX_INVAL_INITED)
+ return false;
+
+ return !!test_bit(state, &dev->func_state);
+}
+
+static int hinic_os_dep_init(struct hinic_hwdev *hwdev)
+{
+ hwdev->workq = create_singlethread_workqueue(HINIC_HW_WQ_NAME);
+ if (!hwdev->workq) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize hardware workqueue\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void hinic_os_dep_deinit(struct hinic_hwdev *hwdev)
+{
+ destroy_workqueue(hwdev->workq);
+}
+
+void hinic_ppf_hwdev_unreg(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return;
+
+ down(&dev->ppf_sem);
+ dev->ppf_hwdev = NULL;
+ up(&dev->ppf_sem);
+
+ sdk_info(dev->dev_hdl, "Unregister PPF hwdev\n");
+}
+
+void hinic_ppf_hwdev_reg(void *hwdev, void *ppf_hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return;
+
+ down(&dev->ppf_sem);
+ dev->ppf_hwdev = ppf_hwdev;
+ up(&dev->ppf_sem);
+
+ sdk_info(dev->dev_hdl, "Register PPF hwdev\n");
+}
+
+static int __vf_func_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_vf_mbox_random_id_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init vf mbox random id\n");
+ return err;
+ }
+ err = hinic_vf_func_init(hwdev);
+ if (err)
+ nic_err(hwdev->dev_hdl, "Failed to init nic mbox\n");
+
+ return err;
+}
+
+static int __hilink_phy_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (!HINIC_IS_VF(hwdev)) {
+ err = hinic_phy_init_status_judge(hwdev);
+ if (err) {
+ sdk_info(hwdev->dev_hdl, "Phy init failed\n");
+ return err;
+ }
+
+ if (hinic_support_nic(hwdev, NULL))
+ hinic_hilink_info_show(hwdev);
+ }
+
+ return 0;
+}
+
+/* Return:
+ * 0: all success
+ * >0: partitial success
+ * <0: all failed
+ */
+int hinic_init_hwdev(struct hinic_init_para *para)
+{
+ struct hinic_hwdev *hwdev;
+ int err;
+
+ if (!(*para->hwdev)) {
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
+
+ *para->hwdev = hwdev;
+ hwdev->adapter_hdl = para->adapter_hdl;
+ hwdev->pcidev_hdl = para->pcidev_hdl;
+ hwdev->dev_hdl = para->dev_hdl;
+ hwdev->chip_node = para->chip_node;
+ hwdev->ppf_hwdev = para->ppf_hwdev;
+ sema_init(&hwdev->ppf_sem, 1);
+ sema_init(&hwdev->func_sem, 1);
+ hwdev->func_ref = 0;
+
+ hwdev->chip_fault_stats = vzalloc(HINIC_CHIP_FAULT_SIZE);
+ if (!hwdev->chip_fault_stats)
+ goto alloc_chip_fault_stats_err;
+
+ err = hinic_init_hwif(hwdev, para->cfg_reg_base,
+ para->intr_reg_base,
+ para->db_base_phy, para->db_base,
+ para->dwqe_mapping);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init hwif\n");
+ goto init_hwif_err;
+ }
+ } else {
+ hwdev = *para->hwdev;
+ }
+
+ /* detect slave host according to BAR reg */
+ detect_host_mode_pre(hwdev);
+
+ if (IS_BMGW_SLAVE_HOST(hwdev) &&
+ (!hinic_get_master_host_mbox_enable(hwdev))) {
+ set_bit(HINIC_HWDEV_NONE_INITED, &hwdev->func_state);
+ sdk_info(hwdev->dev_hdl, "Master host not ready, init hwdev later\n");
+ return (1 << HINIC_HWDEV_ALL_INITED);
+ }
+
+ err = hinic_os_dep_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n");
+ goto os_dep_init_err;
+ }
+
+ hinic_set_chip_present(hwdev);
+ hinic_init_heartbeat(hwdev);
+
+ err = init_cfg_mgmt(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n");
+ goto init_cfg_mgmt_err;
+ }
+
+ err = hinic_init_comm_ch(hwdev);
+ if (err) {
+ if (!(hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK)) {
+ sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n");
+ goto init_comm_ch_err;
+ } else {
+ sdk_err(hwdev->dev_hdl, "Init communication channel partitail failed\n");
+ return hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK;
+ }
+ }
+
+ err = init_capability(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init capability\n");
+ goto init_cap_err;
+ }
+
+ if (hwdev->cfg_mgmt->svc_cap.force_up)
+ hwdev->feature_cap |= HINIC_FUNC_FORCE_LINK_UP;
+
+ err = __vf_func_init(hwdev);
+ if (err)
+ goto vf_func_init_err;
+
+ err = __hilink_phy_init(hwdev);
+ if (err)
+ goto hilink_phy_init_err;
+
+ set_bit(HINIC_HWDEV_ALL_INITED, &hwdev->func_state);
+
+ sdk_info(hwdev->dev_hdl, "Init hwdev success\n");
+
+ return 0;
+
+hilink_phy_init_err:
+
+ hinic_vf_func_free(hwdev);
+vf_func_init_err:
+ free_capability(hwdev);
+init_cap_err:
+ return (hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK);
+
+init_comm_ch_err:
+ free_cfg_mgmt(hwdev);
+
+init_cfg_mgmt_err:
+ hinic_destroy_heartbeat(hwdev);
+ hinic_os_dep_deinit(hwdev);
+
+os_dep_init_err:
+ hinic_free_hwif(hwdev);
+
+init_hwif_err:
+ vfree(hwdev->chip_fault_stats);
+
+alloc_chip_fault_stats_err:
+ kfree(hwdev);
+ *para->hwdev = NULL;
+
+ return -EFAULT;
+}
+
+/**
+ * hinic_set_vf_dev_cap - Set max queue num for VF
+ * @hwdev: the HW device for VF
+ */
+int hinic_set_vf_dev_cap(void *hwdev)
+{
+ int err;
+ struct hinic_hwdev *dev;
+ enum func_type type;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ dev = (struct hinic_hwdev *)hwdev;
+ type = HINIC_FUNC_TYPE(dev);
+ if (type != TYPE_VF)
+ return -EPERM;
+
+ err = get_dev_cap(dev);
+ if (err)
+ return err;
+
+ nic_param_fix(dev);
+
+ return 0;
+}
+
+void hinic_free_hwdev(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ enum hinic_hwdev_init_state state = HINIC_HWDEV_ALL_INITED;
+ int flag = 0;
+
+ if (!hwdev)
+ return;
+
+ if (test_bit(HINIC_HWDEV_ALL_INITED, &dev->func_state)) {
+ clear_bit(HINIC_HWDEV_ALL_INITED, &dev->func_state);
+
+ /* BM slave function not need to exec rx_tx_flush */
+ if (dev->func_mode != FUNC_MOD_MULTI_BM_SLAVE)
+ hinic_func_rx_tx_flush(hwdev);
+
+ hinic_vf_func_free(hwdev);
+
+ free_capability(dev);
+ }
+ while (state > HINIC_HWDEV_NONE_INITED) {
+ if (test_bit(state, &dev->func_state)) {
+ flag = 1;
+ break;
+ }
+ state--;
+ }
+ if (flag) {
+ hinic_uninit_comm_ch(dev);
+ free_cfg_mgmt(dev);
+ hinic_destroy_heartbeat(dev);
+ hinic_os_dep_deinit(dev);
+ }
+ clear_bit(HINIC_HWDEV_NONE_INITED, &dev->func_state);
+ hinic_free_hwif(dev);
+ vfree(dev->chip_fault_stats);
+ kfree(dev);
+}
+
+void hinic_set_api_stop(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return;
+
+ dev->chip_present_flag = HINIC_CHIP_ABSENT;
+ sdk_info(dev->dev_hdl, "Set card absent\n");
+ hinic_force_complete_all(dev);
+ sdk_info(dev->dev_hdl, "All messages interacting with the chip will stop\n");
+}
+
+void hinic_shutdown_hwdev(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return;
+
+ if (IS_SLAVE_HOST(dev))
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false);
+}
+
+u32 hinic_func_pf_num(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting pf number capability\n");
+ return 0;
+ }
+
+ return dev->cfg_mgmt->svc_cap.pf_num;
+}
+
+u64 hinic_get_func_feature_cap(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting function feature capability\n");
+ return 0;
+ }
+
+ return dev->feature_cap;
+}
+
+enum hinic_func_mode hinic_get_func_mode(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting function mode\n");
+ return 0;
+ }
+
+ return dev->func_mode;
+}
+EXPORT_SYMBOL(hinic_get_func_mode);
+
+enum hinic_service_mode hinic_get_service_mode(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for getting service mode\n");
+ return HINIC_WORK_MODE_INVALID;
+ }
+
+ return dev->board_info.service_mode;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cfg.h b/drivers/net/ethernet/huawei/hinic/hinic_cfg.h
new file mode 100644
index 000000000000..28b9c0c4b93e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cfg.h
@@ -0,0 +1,526 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __CFG_MGT_H__
+#define __CFG_MGT_H__
+
+#include "hinic_ctx_def.h"
+
+enum {
+ CFG_FREE = 0,
+ CFG_BUSY = 1
+};
+
+/* start position for CEQs allocation, Max number of CEQs is 32 */
+/*lint -save -e849*/
+enum {
+ CFG_RDMA_CEQ_BASE = 0
+};
+
+/*lint -restore*/
+enum {
+ CFG_NET_MODE_ETH = 0, /* Eth */
+ CFG_NET_MODE_FIC = 1, /* FIC */
+ CFG_NET_MODE_FC = 2 /* FC */
+};
+
+enum {
+ SF_SVC_FT_BIT = (1 << 0),
+ SF_SVC_RDMA_BIT = (1 << 1),
+};
+
+/* RDMA resource */
+#define K_UNIT BIT(10)
+#define M_UNIT BIT(20)
+#define G_UNIT BIT(30)
+
+/* number of PFs and VFs */
+#define HOST_PF_NUM 4
+#define HOST_VF_NUM 0
+#define HOST_OQID_MASK_VAL 2
+
+/* L2NIC */
+#define L2NIC_SQ_DEPTH (4 * K_UNIT)
+#define L2NIC_RQ_DEPTH (4 * K_UNIT)
+
+#define HINIC_CFG_MAX_QP 128
+
+/* RDMA */
+#define RDMA_RSVD_QPS 2
+#define ROCE_MAX_WQES (16 * K_UNIT - 1)
+#define IWARP_MAX_WQES (8 * K_UNIT)
+
+#define RDMA_MAX_SQ_SGE 8
+
+#define ROCE_MAX_RQ_SGE 8
+#define IWARP_MAX_RQ_SGE 2
+
+#define RDMA_MAX_SQ_DESC_SZ (1 * K_UNIT)
+
+/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 64B(max_task_seg_len)) */
+#define ROCE_MAX_SQ_INLINE_DATA_SZ 192
+
+#define IWARP_MAX_SQ_INLINE_DATA_SZ 108
+
+#define ROCE_MAX_RQ_DESC_SZ 128
+#define IWARP_MAX_RQ_DESC_SZ 64
+
+#define IWARP_MAX_IRQ_DEPTH 1024
+#define IWARP_IRQ_ENTRY_SZ 64
+
+#define IWARP_MAX_ORQ_DEPTH 1024
+#define IWARP_ORQ_ENTRY_SZ 32
+
+#define IWARP_MAX_RTOQ_DEPTH 1024
+#define IWARP_RTOQ_ENTRY_SZ 32
+
+#define IWARP_MAX_ACKQ_DEPTH 1024
+#define IWARP_ACKQ_ENTRY_SZ 16
+
+#define ROCE_QPC_ENTRY_SZ 512
+#define IWARP_QPC_ENTRY_SZ 1024
+
+#define WQEBB_SZ 64
+
+#define ROCE_RDMARC_ENTRY_SZ 32
+#define ROCE_MAX_QP_INIT_RDMA 128
+#define ROCE_MAX_QP_DEST_RDMA 128
+
+#define ROCE_MAX_SRQ_WQES (16 * K_UNIT - 1)
+#define ROCE_RSVD_SRQS 0
+#define ROCE_MAX_SRQ_SGE 7
+#define ROCE_SRQC_ENTERY_SZ 64
+
+#define RDMA_MAX_CQES (64 * K_UNIT - 1)
+#define RDMA_RSVD_CQS 0
+
+#define RDMA_CQC_ENTRY_SZ 128
+
+#define RDMA_CQE_SZ 32
+#define RDMA_RSVD_MRWS 128
+#define RDMA_MPT_ENTRY_SZ 64
+#define RDMA_NUM_MTTS (1 * G_UNIT)
+#define LOG_MTT_SEG 5
+#define MTT_ENTRY_SZ 8
+#define LOG_RDMARC_SEG 3
+
+#define LOCAL_ACK_DELAY 15
+#define RDMA_NUM_PORTS 1
+#define ROCE_MAX_MSG_SZ (2 * G_UNIT)
+#define IWARP_MAX_MSG_SZ (1 * G_UNIT)
+
+#define DB_PAGE_SZ (4 * K_UNIT)
+#define DWQE_SZ 256
+
+#define NUM_PD (128 * K_UNIT)
+#define RSVD_PD 0
+
+#define MAX_XRCDS (64 * K_UNIT)
+#define RSVD_XRCDS 0
+
+#define MAX_GID_PER_PORT 16
+#define GID_ENTRY_SZ 32
+#define RSVD_LKEY ((RDMA_RSVD_MRWS - 1) << 8)
+#define NUM_COMP_VECTORS 32
+#define PAGE_SZ_CAP ((1UL << 12) | (1UL << 13) | (1UL << 14) | \
+ (1UL << 16) | (1UL << 18) | (1UL << 20) | \
+ (1UL << 22))
+#define ROCE_MODE 1
+
+#define MAX_FRPL_LEN 511
+#define MAX_PKEYS 1
+
+/* FCoE */
+#define FCOE_PCTX_SZ 256
+#define FCOE_CCTX_SZ 256
+#define FCOE_SQE_SZ 128
+#define FCOE_SCQC_SZ 64
+#define FCOE_SCQE_SZ 64
+#define FCOE_SRQC_SZ 64
+#define FCOE_SRQE_SZ 32
+
+/* ToE */
+#define TOE_PCTX_SZ 1024
+#define TOE_CQC_SZ 64
+
+/* IoE */
+#define IOE_PCTX_SZ 512
+
+/* FC */
+#define FC_PCTX_SZ 256
+#define FC_CCTX_SZ 256
+#define FC_SQE_SZ 128
+#define FC_SCQC_SZ 64
+#define FC_SCQE_SZ 64
+#define FC_SRQC_SZ 64
+#define FC_SRQE_SZ 32
+
+/* OVS */
+#define OVS_PCTX_SZ 256
+#define OVS_SCQC_SZ 64
+
+/* ACL */
+#define ACL_PCTX_SZ 512
+#define ACL_SCQC_SZ 64
+
+struct dev_sf_svc_attr {
+ bool ft_en; /* business enable flag (not include RDMA) */
+ bool ft_pf_en; /* In FPGA Test VF resource is in PF or not,
+ * 0 - VF, 1 - PF, VF doesn't need this bit.
+ */
+ bool rdma_en;
+ bool rdma_pf_en;/* In FPGA Test VF RDMA resource is in PF or not,
+ * 0 - VF, 1 - PF, VF doesn't need this bit.
+ */
+ u8 sf_en_vf; /* SF_EN for PPF/PF's VF */
+};
+
+struct host_shared_resource_cap {
+ u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */
+ u32 host_cctxs; /* Child Context: max 8K */
+ u32 host_scqs; /* shared CQ, chip interface module uses 1 SCQ
+ * TOE/IOE/FCoE each uses 1 SCQ
+ * RoCE/IWARP uses multiple SCQs
+ * So 6 SCQ least
+ */
+ u32 host_srqs; /* SRQ number: 256K */
+ u32 host_mpts; /* MR number:1M */
+};
+
+/* device capability */
+struct service_cap {
+ struct dev_sf_svc_attr sf_svc_attr;
+ enum cfg_svc_type_en svc_type; /* user input service type */
+ enum cfg_svc_type_en chip_svc_type; /* HW supported service type */
+
+ /* Host global resources */
+ u16 host_total_function;
+ u8 host_oq_id_mask_val;
+ u8 host_id;
+ u8 ep_id;
+ /* DO NOT get interrupt_type from firmware */
+ enum intr_type interrupt_type;
+ u8 intr_chip_en;
+ u8 max_cos_id; /* PF/VF's max cos id */
+ u8 cos_valid_bitmap;
+ u8 er_id; /* PF/VF's ER */
+ u8 port_id; /* PF/VF's physical port */
+ u8 max_vf; /* max VF number that PF supported */
+ u8 force_up;
+ bool sf_en; /* stateful business status */
+ u8 timer_en; /* 0:disable, 1:enable */
+ u8 bloomfilter_en; /* 0:disable, 1:enable*/
+ u16 max_sqs;
+ u16 max_rqs;
+
+ /* For test */
+ u32 test_qpc_num;
+ u32 test_qpc_resvd_num;
+ u32 test_page_size_reorder;
+ bool test_xid_alloc_mode;
+ bool test_gpa_check_enable;
+ u8 test_qpc_alloc_mode;
+ u8 test_scqc_alloc_mode;
+
+ u32 test_max_conn_num;
+ u32 test_max_cache_conn_num;
+ u32 test_scqc_num;
+ u32 test_mpt_num;
+ u32 test_scq_resvd_num;
+ u32 test_mpt_recvd_num;
+ u32 test_hash_num;
+ u32 test_reorder_num;
+
+ u32 max_connect_num; /* PF/VF maximum connection number(1M) */
+ /* The maximum connections which can be stick to cache memory, max 1K */
+ u16 max_stick2cache_num;
+ /* Starting address in cache memory for bloom filter, 64Bytes aligned */
+ u16 bfilter_start_addr;
+ /* Length for bloom filter, aligned on 64Bytes. The size is length*64B.
+ * Bloom filter memory size + 1 must be power of 2.
+ * The maximum memory size of bloom filter is 4M
+ */
+ u16 bfilter_len;
+ /* The size of hash bucket tables, align on 64 entries.
+ * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2.
+ * The maximum number of hash bucket is 4M
+ */
+ u16 hash_bucket_num;
+ u8 net_port_mode; /* 0:ETH,1:FIC,2:4FC */
+
+ u32 pf_num;
+ u32 pf_id_start;
+ u32 vf_num; /* max numbers of vf in current host */
+ u32 vf_id_start;
+
+ struct host_shared_resource_cap shared_res_cap; /* shared capability */
+ struct dev_version_info dev_ver_info; /* version */
+ struct nic_service_cap nic_cap; /* NIC capability */
+ struct rdma_service_cap rdma_cap; /* RDMA capability */
+ struct fcoe_service_cap fcoe_cap; /* FCoE capability */
+ struct toe_service_cap toe_cap; /* ToE capability */
+ struct fc_service_cap fc_cap; /* FC capability */
+ struct ovs_service_cap ovs_cap; /* OVS capability */
+ struct acl_service_cap acl_cap; /* ACL capability */
+};
+
+struct cfg_eq {
+ enum hinic_service_type type;
+ int eqn;
+ int free; /* 1 - alocated, 0- freed */
+};
+
+struct cfg_eq_info {
+ struct cfg_eq *eq;
+
+ u8 num_ceq;
+ u8 num_ceq_remain;
+
+ /* mutex used for allocate EQs */
+ struct mutex eq_mutex;
+};
+
+struct irq_alloc_info_st {
+ enum hinic_service_type type;
+ int free; /* 1 - alocated, 0- freed */
+ struct irq_info info;
+};
+
+struct cfg_irq_info {
+ struct irq_alloc_info_st *alloc_info;
+ u16 num_total;
+ u16 num_irq_remain;
+ u16 num_irq_hw; /* device max irq number */
+
+ /* mutex used for allocate EQs */
+ struct mutex irq_mutex;
+};
+
+#define VECTOR_THRESHOLD 2
+
+struct cfg_mgmt_info {
+ struct hinic_hwdev *hwdev;
+ struct service_cap svc_cap;
+ struct cfg_eq_info eq_info; /* EQ */
+ struct cfg_irq_info irq_param_info; /* IRQ */
+ u32 func_seq_num; /* temporary */
+};
+
+enum cfg_sub_cmd {
+ /* PPF(PF) <-> FW */
+ HINIC_CFG_NIC_CAP = 0,
+ CFG_FW_VERSION,
+ CFG_UCODE_VERSION,
+ HINIC_CFG_FUNC_CAP,
+ HINIC_CFG_MBOX_CAP = 6,
+};
+
+struct hinic_dev_cap {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ /* Public resource */
+ u8 sf_svc_attr;
+ u8 host_id;
+ u8 sf_en_pf;
+ u8 sf_en_vf;
+
+ u8 ep_id;
+ u8 intr_type;
+ u8 max_cos_id;
+ u8 er_id;
+ u8 port_id;
+ u8 max_vf;
+ u16 svc_cap_en;
+ u16 host_total_func;
+ u8 host_oq_id_mask_val;
+ u8 max_vf_cos_id;
+
+ u32 max_conn_num;
+ u16 max_stick2cache_num;
+ u16 max_bfilter_start_addr;
+ u16 bfilter_len;
+ u16 hash_bucket_num;
+ u8 cfg_file_ver;
+ u8 net_port_mode;
+ u8 valid_cos_bitmap; /* every bit indicate cos is valid */
+ u8 force_up;
+ u32 pf_num;
+ u32 pf_id_start;
+ u32 vf_num;
+ u32 vf_id_start;
+
+ /* shared resource */
+ u32 host_pctx_num;
+ u8 host_sf_en;
+ u8 rsvd2[3];
+ u32 host_ccxt_num;
+ u32 host_scq_num;
+ u32 host_srq_num;
+ u32 host_mpt_num;
+
+ /* l2nic */
+ u16 nic_max_sq;
+ u16 nic_max_rq;
+ u16 nic_vf_max_sq;
+ u16 nic_vf_max_rq;
+ u8 nic_lro_en;
+ u8 nic_lro_sz;
+ u8 nic_tso_sz;
+ u8 max_queue_allowed;
+
+ /* RoCE */
+ u32 roce_max_qp;
+ u32 roce_max_cq;
+ u32 roce_max_srq;
+ u32 roce_max_mpt;
+
+ u32 roce_vf_max_qp;
+ u32 roce_vf_max_cq;
+ u32 roce_vf_max_srq;
+ u32 roce_vf_max_mpt;
+
+ u32 roce_cmtt_cl_start;
+ u32 roce_cmtt_cl_end;
+ u32 roce_cmtt_cl_size;
+
+ u32 roce_dmtt_cl_start;
+ u32 roce_dmtt_cl_end;
+ u32 roce_dmtt_cl_size;
+
+ u32 roce_wqe_cl_start;
+ u32 roce_wqe_cl_end;
+ u32 roce_wqe_cl_size;
+
+ /* IWARP */
+ u32 iwarp_max_qp;
+ u32 iwarp_max_cq;
+ u32 iwarp_max_mpt;
+
+ u32 iwarp_vf_max_qp;
+ u32 iwarp_vf_max_cq;
+ u32 iwarp_vf_max_mpt;
+
+ u32 iwarp_cmtt_cl_start;
+ u32 iwarp_cmtt_cl_end;
+ u32 iwarp_cmtt_cl_size;
+
+ u32 iwarp_dmtt_cl_start;
+ u32 iwarp_dmtt_cl_end;
+ u32 iwarp_dmtt_cl_size;
+
+ u32 iwarp_wqe_cl_start;
+ u32 iwarp_wqe_cl_end;
+ u32 iwarp_wqe_cl_size;
+
+ /* FCoE */
+ u32 fcoe_max_qp;
+ u32 fcoe_max_cq;
+ u32 fcoe_max_srq;
+
+ u32 fcoe_max_cctx;
+ u32 fcoe_cctx_id_start;
+
+ u8 fcoe_vp_id_start;
+ u8 fcoe_vp_id_end;
+ u8 rsvd4[2];
+
+ /* OVS */
+ u32 ovs_max_qpc;
+ u8 ovs_dq_en;
+ u8 rsvd5[3];
+
+ /* ToE */
+ u32 toe_max_pctx;
+ u32 toe_max_cq;
+ u32 toe_max_srq;
+ u32 toe_srq_id_start;
+
+ /* FC */
+ u32 fc_max_pctx;
+ u32 fc_max_scq;
+ u32 fc_max_srq;
+
+ u32 fc_max_cctx;
+ u32 fc_cctx_id_start;
+
+ u8 fc_vp_id_start;
+ u8 fc_vp_id_end;
+ u16 func_id;
+};
+
+#define VSW_UP_CFG_TIMEOUT (0xFF00000)
+
+#define VSW_SET_STATEFUL_BITS_TOE(flag) \
+ ((flag) << VSW_STATEFUL_TOE_EN)
+#define VSW_SET_STATEFUL_BITS_FCOE(flag) \
+ ((flag) << VSW_STATEFUL_FCOE_EN)
+#define VSW_SET_STATEFUL_BITS_IWARP(flag) \
+ ((flag) << VSW_STATEFUL_IWARP_EN)
+#define VSW_SET_STATEFUL_BITS_ROCE(flag) \
+ ((flag) << VSW_STATEFUL_ROCE_EN)
+
+#define VSW_GET_STATEFUL_BITS_TOE(flag) \
+ ((bool)(((flag) >> VSW_STATEFUL_TOE_EN) & 0x1U))
+#define VSW_GET_STATEFUL_BITS_FCOE(flag) \
+ ((bool)(((flag) >> VSW_STATEFUL_FCOE_EN) & 0x1U))
+#define VSW_GET_STATEFUL_BITS_IWARP(flag) \
+ ((bool)(((flag) >> VSW_STATEFUL_IWARP_EN) & 0x1U))
+#define VSW_GET_STATEFUL_BITS_ROCE(flag) \
+ ((bool)(((flag) >> VSW_STATEFUL_ROCE_EN) & 0x1U))
+
+enum tag_vsw_major_cmd {
+ VSW_MAJOR_MISC = 10, /* 0~9 reserved for driver */
+ VSW_MAJOR_L2SWITCH,
+ VSW_MAJOR_L2MULTICAST,
+ VSW_MAJOR_QOS,
+ VSW_MAJOR_PKTSUPS,
+ VSW_MAJOR_VLANFILTER,
+ VSW_MAJOR_MACFILTER,
+ VSW_MAJOR_IPFILTER,
+ VSW_MAJOR_VLANMAPPING,
+ VSW_MAJOR_ETHTRUNK,
+ VSW_MAJOR_MIRROR,
+ VSW_MAJOR_DFX,
+ VSW_MAJOR_ACL,
+};
+
+enum tag_vsw_minor_misc_cmd {
+ VSW_MINOR_MISC_INIT_FUNC = 0,
+ VSW_MINOR_MISC_SET_FUNC_SF_ENBITS,
+ VSW_MINOR_MISC_GET_FUNC_SF_ENBITS,
+ VSW_MINOR_MISC_CMD_MAX,
+};
+
+/* vswitch eth-trunk sub-command */
+enum tag_nic_stateful_enbits {
+ VSW_STATEFUL_TOE_EN = 0,
+ VSW_STATEFUL_FCOE_EN = 1,
+ VSW_STATEFUL_IWARP_EN = 2,
+ VSW_STATEFUL_ROCE_EN = 3,
+};
+
+/* function stateful enable parameters */
+struct nic_misc_func_sf_enbits {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+ u32 function_id;
+ u32 stateful_enbits; /* b0:toe, b1:fcoe, b2:iwarp, b3:roce */
+ u32 stateful_enmask; /* b0:toe, b1:fcoe, b2:iwarp, b3:roce */
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
new file mode 100644
index 000000000000..277be1c2b84e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c
@@ -0,0 +1,1575 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_hwif.h"
+#include "hinic_nic_io.h"
+#include "hinic_eqs.h"
+#include "hinic_wq.h"
+#include "hinic_cmdq.h"
+
+#define CMDQ_CMD_TIMEOUT 5000 /* millisecond */
+#define CMDQ_CMD_RETRY_TIMEOUT 1000 /* millisecond */
+
+#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
+#define LOWER_8_BITS(data) ((data) & 0xFF)
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
+#define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23
+#define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
+#define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU
+#define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U
+#define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U
+#define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU
+
+#define CMDQ_DB_INFO_SET(val, member) \
+ (((val) & CMDQ_DB_INFO_##member##_MASK) \
+ << CMDQ_DB_INFO_##member##_SHIFT)
+
+#define CMDQ_CTRL_PI_SHIFT 0
+#define CMDQ_CTRL_CMD_SHIFT 16
+#define CMDQ_CTRL_MOD_SHIFT 24
+#define CMDQ_CTRL_ACK_TYPE_SHIFT 29
+#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
+
+#define CMDQ_CTRL_PI_MASK 0xFFFFU
+#define CMDQ_CTRL_CMD_MASK 0xFFU
+#define CMDQ_CTRL_MOD_MASK 0x1FU
+#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U
+#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_CTRL_SET(val, member) \
+ (((val) & CMDQ_CTRL_##member##_MASK) \
+ << CMDQ_CTRL_##member##_SHIFT)
+
+#define CMDQ_CTRL_GET(val, member) \
+ (((val) >> CMDQ_CTRL_##member##_SHIFT) \
+ & CMDQ_CTRL_##member##_MASK)
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
+#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
+#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_WQE_HEADER_SET(val, member) \
+ (((val) & CMDQ_WQE_HEADER_##member##_MASK) \
+ << CMDQ_WQE_HEADER_##member##_SHIFT)
+
+#define CMDQ_WQE_HEADER_GET(val, member) \
+ (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) \
+ & CMDQ_WQE_HEADER_##member##_MASK)
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
+#define CMDQ_CTXT_EQ_ID_SHIFT 56
+#define CMDQ_CTXT_CEQ_ARM_SHIFT 61
+#define CMDQ_CTXT_CEQ_EN_SHIFT 62
+#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
+#define CMDQ_CTXT_EQ_ID_MASK 0x1F
+#define CMDQ_CTXT_CEQ_ARM_MASK 0x1
+#define CMDQ_CTXT_CEQ_EN_MASK 0x1
+#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1
+
+#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \
+ (((u64)(val) & CMDQ_CTXT_##member##_MASK) \
+ << CMDQ_CTXT_##member##_SHIFT)
+
+#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \
+ (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \
+ & CMDQ_CTXT_##member##_MASK)
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
+#define CMDQ_CTXT_CI_SHIFT 52
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
+#define CMDQ_CTXT_CI_MASK 0xFFF
+
+#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
+ (((u64)(val) & CMDQ_CTXT_##member##_MASK) \
+ << CMDQ_CTXT_##member##_SHIFT)
+
+#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \
+ (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \
+ & CMDQ_CTXT_##member##_MASK)
+
+#define SAVED_DATA_ARM_SHIFT 31
+
+#define SAVED_DATA_ARM_MASK 0x1U
+
+#define SAVED_DATA_SET(val, member) \
+ (((val) & SAVED_DATA_##member##_MASK) \
+ << SAVED_DATA_##member##_SHIFT)
+
+#define SAVED_DATA_CLEAR(val, member) \
+ ((val) & (~(SAVED_DATA_##member##_MASK \
+ << SAVED_DATA_##member##_SHIFT)))
+
+#define WQE_ERRCODE_VAL_SHIFT 20
+
+#define WQE_ERRCODE_VAL_MASK 0xF
+
+#define WQE_ERRCODE_GET(val, member) \
+ (((val) >> WQE_ERRCODE_##member##_SHIFT) & \
+ WQE_ERRCODE_##member##_MASK)
+
+#define CEQE_CMDQ_TYPE_SHIFT 0
+
+#define CEQE_CMDQ_TYPE_MASK 0x7
+
+#define CEQE_CMDQ_GET(val, member) \
+ (((val) >> CEQE_CMDQ_##member##_SHIFT) & CEQE_CMDQ_##member##_MASK)
+
+#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
+
+#define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
+
+#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
+
+#define CMDQ_DB_ADDR(db_base, pi) \
+ (((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi))
+
+#define CMDQ_PFN_SHIFT 12
+#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT)
+
+#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
+
+#define WQE_LCMD_SIZE 64
+#define WQE_SCMD_SIZE 64
+
+#define COMPLETE_LEN 3
+
+#define CMDQ_WQEBB_SIZE 64
+#define CMDQ_WQE_SIZE 64
+
+#define CMDQ_WQ_PAGE_SIZE 4096
+
+#define WQE_NUM_WQEBBS(wqe_size, wq) \
+ ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
+
+#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
+ struct hinic_cmdqs, cmdq[0])
+
+#define CMDQ_SEND_CMPT_CODE 10
+#define CMDQ_COMPLETE_CMPT_CODE 11
+
+#define HINIC_GET_CMDQ_FREE_WQEBBS(cmdq_wq) \
+ atomic_read(&(cmdq_wq)->delta)
+
+enum cmdq_scmd_type {
+ CMDQ_SET_ARM_CMD = 2,
+};
+
+enum cmdq_wqe_type {
+ WQE_LCMD_TYPE,
+ WQE_SCMD_TYPE,
+};
+
+enum ctrl_sect_len {
+ CTRL_SECT_LEN = 1,
+ CTRL_DIRECT_SECT_LEN = 2,
+};
+
+enum bufdesc_len {
+ BUFDESC_LCMD_LEN = 2,
+ BUFDESC_SCMD_LEN = 3,
+};
+
+enum data_format {
+ DATA_SGE,
+ DATA_DIRECT,
+};
+
+enum completion_format {
+ COMPLETE_DIRECT,
+ COMPLETE_SGE,
+};
+
+enum completion_request {
+ CEQ_SET = 1,
+};
+
+enum cmdq_cmd_type {
+ SYNC_CMD_DIRECT_RESP,
+ SYNC_CMD_SGE_RESP,
+ ASYNC_CMD,
+};
+
+bool hinic_cmdq_idle(struct hinic_cmdq *cmdq)
+{
+ struct hinic_wq *wq = cmdq->wq;
+
+ return (atomic_read(&wq->delta) == wq->q_depth ? true : false);
+}
+
+struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev)
+{
+ struct hinic_cmdqs *cmdqs;
+ struct hinic_cmd_buf *cmd_buf;
+ void *dev;
+
+ if (!hwdev) {
+ pr_err("Failed to alloc cmd buf, invalid hwdev\n");
+ return NULL;
+ }
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+ dev = ((struct hinic_hwdev *)hwdev)->dev_hdl;
+
+ cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC);
+ if (!cmd_buf)
+ return NULL;
+
+ cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC,
+ &cmd_buf->dma_addr);
+ if (!cmd_buf->buf) {
+ sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n");
+ goto alloc_pci_buf_err;
+ }
+
+ return cmd_buf;
+
+alloc_pci_buf_err:
+ kfree(cmd_buf);
+ return NULL;
+}
+EXPORT_SYMBOL(hinic_alloc_cmd_buf);
+
+void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf)
+{
+ struct hinic_cmdqs *cmdqs;
+
+ if (!hwdev || !cmd_buf) {
+ pr_err("Failed to free cmd buf\n");
+ return;
+ }
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ dma_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
+ kfree(cmd_buf);
+}
+EXPORT_SYMBOL(hinic_free_cmd_buf);
+
+static int cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
+{
+ int wqe_size = 0;
+
+ switch (wqe_type) {
+ case WQE_LCMD_TYPE:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case WQE_SCMD_TYPE:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ }
+
+ return wqe_size;
+}
+
+static int cmdq_get_wqe_size(enum bufdesc_len len)
+{
+ int wqe_size = 0;
+
+ switch (len) {
+ case BUFDESC_LCMD_LEN:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case BUFDESC_SCMD_LEN:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ }
+
+ return wqe_size;
+}
+
+static void cmdq_set_completion(struct hinic_cmdq_completion *complete,
+ struct hinic_cmd_buf *buf_out)
+{
+ struct hinic_sge_resp *sge_resp = &complete->sge_resp;
+
+ hinic_set_sge(&sge_resp->sge, buf_out->dma_addr,
+ HINIC_CMDQ_BUF_SIZE);
+}
+
+static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe,
+ struct hinic_cmd_buf *buf_in)
+{
+ hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
+}
+
+static void cmdq_set_inline_wqe_data(struct hinic_cmdq_inline_wqe *wqe,
+ const void *buf_in, u32 in_size)
+{
+ struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
+
+ wqe_scmd->buf_desc.buf_len = in_size;
+ memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
+}
+
+static void cmdq_fill_db(struct hinic_cmdq_db *db,
+ enum hinic_cmdq_type cmdq_type, u16 prod_idx)
+{
+ db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
+ CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) |
+ CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
+ CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
+}
+
+static void cmdq_set_db(struct hinic_cmdq *cmdq,
+ enum hinic_cmdq_type cmdq_type, u16 prod_idx)
+{
+ struct hinic_cmdq_db db;
+
+ cmdq_fill_db(&db, cmdq_type, prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ db.db_info = cpu_to_be32(db.db_info);
+
+ wmb(); /* write all before the doorbell */
+ writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
+}
+
+static void cmdq_wqe_fill(void *dst, const void *src)
+{
+ memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
+ (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
+ CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
+
+ wmb(); /* The first 8 bytes should be written last */
+
+ *(u64 *)dst = *(u64 *)src;
+}
+
+static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
+ enum completion_format complete_format,
+ enum data_format data_format,
+ enum bufdesc_len buf_len)
+{
+ struct hinic_ctrl *ctrl;
+ enum ctrl_sect_len ctrl_len;
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+ u32 saved_data = WQE_HEADER(wqe)->saved_data;
+
+ if (data_format == DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->inline_wqe.wqe_scmd;
+
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) |
+ CMDQ_CTRL_SET(cmd, CMD) |
+ CMDQ_CTRL_SET(mod, MOD) |
+ CMDQ_CTRL_SET(ack_type, ACK_TYPE);
+
+ WQE_HEADER(wqe)->header_info =
+ CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
+ CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
+ CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
+ CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
+ CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
+ CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
+ CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
+
+ if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) {
+ saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
+ WQE_HEADER(wqe)->saved_data = saved_data |
+ SAVED_DATA_SET(1, ARM);
+ } else {
+ saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
+ WQE_HEADER(wqe)->saved_data = saved_data;
+ }
+}
+
+static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct hinic_cmd_buf *buf_in,
+ struct hinic_cmd_buf *buf_out, int wrapped,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+ enum completion_format complete_format = COMPLETE_DIRECT;
+
+ switch (cmd_type) {
+ case SYNC_CMD_SGE_RESP:
+ if (buf_out) {
+ complete_format = COMPLETE_SGE;
+ cmdq_set_completion(&wqe_lcmd->completion, buf_out);
+ }
+ break;
+ case SYNC_CMD_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+ break;
+ case ASYNC_CMD:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+
+ wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
+ prod_idx, complete_format, DATA_SGE,
+ BUFDESC_LCMD_LEN);
+
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static void cmdq_set_inline_wqe(struct hinic_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ void *buf_in, u16 in_size,
+ struct hinic_cmd_buf *buf_out, int wrapped,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd;
+ enum completion_format complete_format = COMPLETE_DIRECT;
+
+ switch (cmd_type) {
+ case SYNC_CMD_SGE_RESP:
+ complete_format = COMPLETE_SGE;
+ cmdq_set_completion(&wqe_scmd->completion, buf_out);
+ break;
+ case SYNC_CMD_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_scmd->completion.direct_resp = 0;
+ break;
+ default:
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
+ complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
+
+ cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size);
+}
+
+static void cmdq_update_cmd_status(struct hinic_cmdq *cmdq, u16 prod_idx,
+ struct hinic_cmdq_wqe *wqe)
+{
+ struct hinic_cmdq_cmd_info *cmd_info;
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ u32 status_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ cmd_info = &cmdq->cmd_infos[prod_idx];
+
+ if (cmd_info->errcode) {
+ status_info = be32_to_cpu(wqe_lcmd->status.status_info);
+ *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL);
+ }
+
+ if (cmd_info->direct_resp &&
+ cmd_info->cmd_type == HINIC_CMD_TYPE_DIRECT_RESP)
+ *cmd_info->direct_resp =
+ cpu_to_be64(wqe_lcmd->completion.direct_resp);
+}
+
+static int hinic_cmdq_sync_timeout_check(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 pi,
+ enum hinic_mod_type mod, u8 cmd)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_ctrl *ctrl;
+ u32 ctrl_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+ if (!WQE_COMPLETED(ctrl_info)) {
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ return -EFAULT;
+ }
+
+ cmdq_update_cmd_status(cmdq, pi, wqe);
+
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ return 0;
+}
+
+static void __clear_cmd_info(struct hinic_cmdq_cmd_info *cmd_info,
+ const int *errcode, struct completion *done,
+ u64 *out_param)
+{
+ if (cmd_info->errcode == errcode)
+ cmd_info->errcode = NULL;
+
+ if (cmd_info->done == done)
+ cmd_info->done = NULL;
+
+ if (cmd_info->direct_resp == out_param)
+ cmd_info->direct_resp = NULL;
+}
+
+static int cmdq_retry_get_ack(struct hinic_hwdev *hwdev,
+ struct completion *done, u8 ceq_id)
+{
+ ulong timeo = msecs_to_jiffies(CMDQ_CMD_RETRY_TIMEOUT);
+ int err;
+
+ init_completion(done);
+
+ err = hinic_reschedule_eq(hwdev, HINIC_CEQ, ceq_id);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(done, timeo))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout)
+{
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_wqe *curr_wqe, wqe;
+ struct hinic_cmdq_cmd_info *cmd_info;
+ struct completion done;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ ulong timeo;
+ u64 curr_msg_id;
+ int err;
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* in order to save a wqebb for setting arm_bit when
+ * send cmdq commands frequently resulting in cmdq full
+ */
+ if (HINIC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */
+ curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+
+ init_completion(&done);
+
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->direct_resp = out_param;
+ cmd_info->cmpt_code = &cmpt_code;
+
+ cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
+ wrapped, ack_type, mod, cmd, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hinic_cpu_to_be32(&wqe, wqe_size);
+
+ /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmd_info->cmd_type = HINIC_CMD_TYPE_DIRECT_RESP;
+
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+
+ cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
+ if (!wait_for_completion_timeout(&done, timeo) &&
+ cmdq_retry_get_ack(cmdq->hwdev, &done, HINIC_CEQ_ID_CMDQ)) {
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ if (cmd_info->cmpt_code == &cmpt_code)
+ cmd_info->cmpt_code = NULL;
+
+ if (cmpt_code == CMDQ_COMPLETE_CMPT_CODE) {
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq direct sync command has been completed\n");
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ goto timeout_check_ok;
+ }
+
+ if (curr_msg_id == cmd_info->cmdq_msg_id) {
+ err = hinic_cmdq_sync_timeout_check(cmdq, curr_wqe,
+ curr_prod_idx,
+ mod, cmd);
+ if (err)
+ cmd_info->cmd_type = HINIC_CMD_TYPE_TIMEOUT;
+ else
+ cmd_info->cmd_type =
+ HINIC_CMD_TYPE_FAKE_TIMEOUT;
+ } else {
+ err = -ETIMEDOUT;
+ sdk_err(cmdq->hwdev->dev_hdl,
+ "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ }
+
+ __clear_cmd_info(cmd_info, &errcode, &done, out_param);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ if (!err)
+ goto timeout_check_ok;
+
+ sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, mod: %d cmd: 0x%x prod idx: 0x%x\n",
+ mod, cmd, curr_prod_idx);
+ hinic_dump_ceq_info(cmdq->hwdev);
+ return -ETIMEDOUT;
+ }
+
+timeout_check_ok:
+ smp_rmb(); /* read error code after completion */
+
+ if (errcode > 1)
+ return errcode;
+
+ return 0;
+}
+
+static int cmdq_sync_cmd_detail_resp(struct hinic_cmdq *cmdq,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ struct hinic_cmd_buf *buf_out,
+ u32 timeout)
+{
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_wqe *curr_wqe, wqe;
+ struct hinic_cmdq_cmd_info *cmd_info;
+ struct completion done;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ ulong timeo;
+ u64 curr_msg_id;
+ int err;
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* in order to save a wqebb for setting arm_bit when
+ * send cmdq commands frequently resulting in cmdq full
+ */
+ if (HINIC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
+ curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+
+ init_completion(&done);
+
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->cmpt_code = &cmpt_code;
+
+ cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out,
+ wrapped, ack_type, mod, cmd, curr_prod_idx);
+
+ hinic_cpu_to_be32(&wqe, wqe_size);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmd_info->cmd_type = HINIC_CMD_TYPE_SGE_RESP;
+
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+
+ cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
+ if (!wait_for_completion_timeout(&done, timeo) &&
+ cmdq_retry_get_ack(cmdq->hwdev, &done, HINIC_CEQ_ID_CMDQ)) {
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ if (cmd_info->cmpt_code == &cmpt_code)
+ cmd_info->cmpt_code = NULL;
+
+ if (cmpt_code == CMDQ_COMPLETE_CMPT_CODE) {
+ sdk_info(cmdq->hwdev->dev_hdl, "Cmdq detail sync command has been completed\n");
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ goto timeout_check_ok;
+ }
+
+ if (curr_msg_id == cmd_info->cmdq_msg_id) {
+ err = hinic_cmdq_sync_timeout_check(cmdq, curr_wqe,
+ curr_prod_idx,
+ mod, cmd);
+ if (err)
+ cmd_info->cmd_type = HINIC_CMD_TYPE_TIMEOUT;
+ else
+ cmd_info->cmd_type =
+ HINIC_CMD_TYPE_FAKE_TIMEOUT;
+ } else {
+ err = -ETIMEDOUT;
+ sdk_err(cmdq->hwdev->dev_hdl,
+ "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd: 0x%x\n",
+ mod, cmd);
+ }
+
+ if (cmd_info->errcode == &errcode)
+ cmd_info->errcode = NULL;
+
+ if (cmd_info->done == &done)
+ cmd_info->done = NULL;
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ if (!err)
+ goto timeout_check_ok;
+
+ sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, mod: %d cmd: 0x%x prod idx: 0x%x\n",
+ mod, cmd, curr_prod_idx);
+ hinic_dump_ceq_info(cmdq->hwdev);
+ return -ETIMEDOUT;
+ }
+
+timeout_check_ok:
+ smp_rmb(); /* read error code after completion */
+
+ if (errcode > 1)
+ return errcode;
+
+ return 0;
+}
+
+static int cmdq_async_cmd(struct hinic_cmdq *cmdq, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in)
+{
+ struct hinic_wq *wq = cmdq->wq;
+ int wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ struct hinic_cmdq_wqe *curr_wqe, wqe;
+ int wrapped;
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */
+ curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= cmdq->wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= cmdq->wq->q_depth;
+ }
+
+ cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped,
+ ack_type, mod, cmd, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hinic_cpu_to_be32(&wqe, wqe_size);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_ASYNC;
+
+ cmdq_set_db(cmdq, HINIC_CMDQ_ASYNC, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return 0;
+}
+
+static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, u16 in_size)
+{
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_wqe *curr_wqe, wqe;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped, wqe_size = cmdq_wqe_size(WQE_SCMD_TYPE);
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */
+ spin_lock_bh(&cmdq->cmdq_lock);
+
+ /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */
+ curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb setting arm\n");
+ return -EBUSY;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL,
+ wrapped, HINIC_ACK_TYPE_CMDQ, HINIC_MOD_COMM,
+ CMDQ_SET_ARM_CMD, curr_prod_idx);
+
+ /* The data that is written to HW should be in Big Endian Format */
+ hinic_cpu_to_be32(&wqe, wqe_size);
+
+ /* cmdq wqe is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_SET_ARM;
+
+ cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx);
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return 0;
+}
+
+static int cmdq_params_valid(void *hwdev, struct hinic_cmd_buf *buf_in)
+{
+ if (!buf_in || !hwdev) {
+ pr_err("Invalid CMDQ buffer addr\n");
+ return -EINVAL;
+ }
+
+ if (!buf_in->size || buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
+ pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define WAIT_CMDQ_ENABLE_TIMEOUT 300
+
+static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
+ do {
+ if (cmdqs->status & HINIC_CMDQ_ENABLE)
+ return 0;
+ } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag &&
+ !cmdqs->disable_flag);
+
+ cmdqs->disable_flag = 1;
+
+ return -EBUSY;
+}
+
+int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in, u64 *out_param,
+ u32 timeout)
+{
+ struct hinic_cmdqs *cmdqs;
+ int err = cmdq_params_valid(hwdev, buf_in);
+
+ if (err) {
+ pr_err("Invalid CMDQ parameters\n");
+ return err;
+ }
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n");
+ return err;
+ }
+
+ err = hinic_func_own_get(hwdev);
+ if (err)
+ return err;
+
+ err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], ack_type,
+ mod, cmd, buf_in, out_param, timeout);
+ hinic_func_own_free(hwdev);
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -ETIMEDOUT;
+ else
+ return err;
+}
+EXPORT_SYMBOL(hinic_cmdq_direct_resp);
+
+int hinic_cmdq_detail_resp(void *hwdev,
+ enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ struct hinic_cmd_buf *buf_out,
+ u32 timeout)
+{
+ struct hinic_cmdqs *cmdqs;
+ int err = cmdq_params_valid(hwdev, buf_in);
+
+ if (err)
+ return err;
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n");
+ return err;
+ }
+
+ err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], ack_type,
+ mod, cmd, buf_in, buf_out, timeout);
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -ETIMEDOUT;
+ else
+ return err;
+}
+EXPORT_SYMBOL(hinic_cmdq_detail_resp);
+
+int hinic_cmdq_async(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in)
+{
+ struct hinic_cmdqs *cmdqs;
+ int err = cmdq_params_valid(hwdev, buf_in);
+
+ if (err)
+ return err;
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n");
+ return err;
+ }
+
+ return cmdq_async_cmd(&cmdqs->cmdq[HINIC_CMDQ_ASYNC], ack_type, mod,
+ cmd, buf_in);
+}
+EXPORT_SYMBOL(hinic_cmdq_async);
+
+int hinic_set_arm_bit(void *hwdev, enum hinic_set_arm_type q_type, u16 q_id)
+{
+ struct hinic_cmdqs *cmdqs;
+ struct hinic_cmdq *cmdq;
+ struct hinic_cmdq_arm_bit arm_bit;
+ enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
+ u16 in_size;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED))
+ return -EPERM;
+
+ cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
+
+ if (!(cmdqs->status & HINIC_CMDQ_ENABLE))
+ return -EBUSY;
+
+ if (q_type == HINIC_SET_ARM_CMDQ) {
+ if (q_id >= HINIC_MAX_CMDQ_TYPES)
+ return -EFAULT;
+
+ cmdq_type = q_id;
+ }
+ /* sq is using interrupt now, so we only need to set arm bit for cmdq,
+ * remove comment below if need to set sq arm bit
+ * else
+ * cmdq_type = HINIC_CMDQ_SYNC;
+ */
+
+ cmdq = &cmdqs->cmdq[cmdq_type];
+
+ arm_bit.q_type = q_type;
+ arm_bit.q_id = q_id;
+ in_size = sizeof(arm_bit);
+
+ err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size);
+ if (err) {
+ sdk_err(cmdqs->hwdev->dev_hdl,
+ "Failed to set arm for q_type: %d, qid %d\n",
+ q_type, q_id);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_arm_bit);
+
+static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 ci)
+{
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_cmdq_inline_wqe *inline_wqe;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd;
+ struct hinic_ctrl *ctrl;
+ u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info);
+ int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
+ int wqe_size = cmdq_get_wqe_size(buf_len);
+ u16 num_wqebbs;
+
+ if (wqe_size == WQE_LCMD_SIZE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ } else {
+ inline_wqe = &wqe->inline_wqe;
+ wqe_scmd = &inline_wqe->wqe_scmd;
+ ctrl = &wqe_scmd->ctrl;
+ }
+
+ /* clear HW busy bit */
+ ctrl->ctrl_info = 0;
+ cmdq->cmd_infos[ci].cmd_type = HINIC_CMD_TYPE_NONE;
+
+ wmb(); /* verify wqe is clear */
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
+ hinic_put_wqe(cmdq->wq, num_wqebbs);
+}
+
+static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 cons_idx)
+{
+ u16 prod_idx = cons_idx;
+
+ spin_lock(&cmdq->cmdq_lock);
+
+ cmdq_update_cmd_status(cmdq, prod_idx, wqe);
+
+ if (cmdq->cmd_infos[prod_idx].cmpt_code) {
+ *cmdq->cmd_infos[prod_idx].cmpt_code =
+ CMDQ_COMPLETE_CMPT_CODE;
+ cmdq->cmd_infos[prod_idx].cmpt_code = NULL;
+ }
+
+ /* make sure cmpt_code operation before done operation */
+ smp_rmb();
+
+ if (cmdq->cmd_infos[prod_idx].done) {
+ complete(cmdq->cmd_infos[prod_idx].done);
+ cmdq->cmd_infos[prod_idx].done = NULL;
+ }
+
+ spin_unlock(&cmdq->cmdq_lock);
+
+ clear_wqe_complete_bit(cmdq, wqe, cons_idx);
+}
+
+static void cmdq_async_cmd_handler(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 ci)
+{
+ u64 buf = wqe->wqe_lcmd.buf_desc.saved_async_buf;
+ int addr_sz = sizeof(u64);
+
+ hinic_be32_to_cpu((void *)&buf, addr_sz);
+ if (buf)
+ hinic_free_cmd_buf(hwdev, (struct hinic_cmd_buf *)buf);
+
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+}
+
+static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_wqe *wqe, u16 ci)
+{
+ struct hinic_cmdq_inline_wqe *inline_wqe = &wqe->inline_wqe;
+ struct hinic_cmdq_wqe_scmd *wqe_scmd = &inline_wqe->wqe_scmd;
+ struct hinic_ctrl *ctrl = &wqe_scmd->ctrl;
+ u32 ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+
+ if (!WQE_COMPLETED(ctrl_info))
+ return -EBUSY;
+
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+
+ return 0;
+}
+
+#define HINIC_CMDQ_WQE_HEAD_LEN 32
+static void hinic_dump_cmdq_wqe_head(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq_wqe *wqe)
+{
+ u32 i;
+ u32 *data = (u32 *)wqe;
+
+ for (i = 0; i < (HINIC_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) {
+ sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ data[i], data[i + 1], data[i + 2],
+ data[i + 3]);/*lint !e679*/
+ }
+}
+
+void hinic_cmdq_ceq_handler(void *handle, u32 ceqe_data)
+{
+ struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)handle)->cmdqs;
+ enum hinic_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE);
+ struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
+ struct hinic_hwdev *hwdev = cmdqs->hwdev;
+ struct hinic_cmdq_wqe *wqe;
+ struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic_ctrl *ctrl;
+ struct hinic_cmdq_cmd_info *cmd_info;
+ u32 ctrl_info;
+ u16 ci;
+ int set_arm = 1;
+
+ while ((wqe = hinic_read_wqe(cmdq->wq, 1, &ci)) != NULL) {
+ cmd_info = &cmdq->cmd_infos[ci];
+
+ if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) {
+ set_arm = 1;
+ break;
+ } else if (cmd_info->cmd_type == HINIC_CMD_TYPE_TIMEOUT ||
+ cmd_info->cmd_type == HINIC_CMD_TYPE_FAKE_TIMEOUT) {
+ if (cmd_info->cmd_type == HINIC_CMD_TYPE_TIMEOUT) {
+ sdk_info(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n",
+ cmdq_type, ci);
+ hinic_dump_cmdq_wqe_head(hwdev, wqe);
+ }
+
+ set_arm = 1;
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ } else if (cmd_info->cmd_type == HINIC_CMD_TYPE_SET_ARM) {
+ /* arm_bit was set until here */
+ set_arm = 0;
+
+ if (cmdq_arm_ceq_handler(cmdq, wqe, ci))
+ break;
+ } else {
+ set_arm = 1;
+
+ /* only arm bit is using scmd wqe, the wqe is lcmd */
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
+
+ if (!WQE_COMPLETED(ctrl_info))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the cmdq wqe until we have
+ * verified the command has been processed and
+ * written back.
+ */
+ dma_rmb();
+
+ if (cmdq_type == HINIC_CMDQ_ASYNC)
+ cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci);
+ else
+ cmdq_sync_cmd_handler(cmdq, wqe, ci);
+ }
+ }
+
+ if (set_arm)
+ hinic_set_arm_bit(hwdev, HINIC_SET_ARM_CMDQ, cmdq_type);
+}
+
+static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
+ struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_cmdq_ctxt *cmdq_ctxt)
+{
+ struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
+ struct hinic_hwdev *hwdev = cmdqs->hwdev;
+ struct hinic_wq *wq = cmdq->wq;
+ struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
+ u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
+ u16 start_ci = (u16)wq->cons_idx;
+
+ /* The data in the HW is in Big Endian Format */
+ wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
+
+ pfn = CMDQ_PFN(wq_first_page_paddr);
+
+ ctxt_info->curr_wqe_page_pfn =
+ CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
+ CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
+ CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
+ CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
+ CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
+
+ /* If only use one page, use 0-level CLA */
+ if (cmdq->wq->num_q_pages != 1) {
+ cmdq_first_block_paddr = cmdq_pages->cmdq_page_paddr;
+ pfn = CMDQ_PFN(cmdq_first_block_paddr);
+ }
+
+ ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
+ CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
+
+ cmdq_ctxt->func_idx = hinic_global_func_id_hw(hwdev);
+ cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ cmdq_ctxt->cmdq_id = cmdq->cmdq_type;
+}
+
+bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq_ctxt *cmdq_ctxt)
+{
+ struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
+ u64 curr_pg_pfn, wq_block_pfn;
+
+ if (cmdq_ctxt->ppf_idx != hinic_ppf_idx(hwdev) ||
+ cmdq_ctxt->cmdq_id > HINIC_MAX_CMDQ_TYPES)
+ return false;
+
+ curr_pg_pfn = CMDQ_CTXT_PAGE_INFO_GET(ctxt_info->curr_wqe_page_pfn,
+ CURR_WQE_PAGE_PFN);
+ wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_GET(ctxt_info->wq_block_pfn,
+ WQ_BLOCK_PFN);
+ /* VF must use 0-level CLA */
+ if (curr_pg_pfn != wq_block_pfn)
+ return false;
+
+ return true;
+}
+
+static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev,
+ struct hinic_wq *wq, enum hinic_cmdq_type q_type)
+{
+ void __iomem *db_base;
+ int err = 0;
+
+ cmdq->wq = wq;
+ cmdq->cmdq_type = q_type;
+ cmdq->wrapped = 1;
+ cmdq->hwdev = hwdev;
+
+ spin_lock_init(&cmdq->cmdq_lock);
+
+ cmdq->cmd_infos = kcalloc(wq->q_depth, sizeof(*cmdq->cmd_infos),
+ GFP_KERNEL);
+ if (!cmdq->cmd_infos) {
+ err = -ENOMEM;
+ goto cmd_infos_err;
+ }
+
+ err = hinic_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err)
+ goto alloc_db_err;
+
+ cmdq->db_base = (u8 *)db_base;
+ return 0;
+
+alloc_db_err:
+ kfree(cmdq->cmd_infos);
+
+cmd_infos_err:
+
+ return err;
+}
+
+static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq)
+{
+ hinic_free_db_addr(hwdev, cmdq->db_base, NULL);
+ kfree(cmdq->cmd_infos);
+}
+
+int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ struct hinic_cmdq_ctxt *cmdq_ctxt, cmdq_ctxt_out = {0};
+ enum hinic_cmdq_type cmdq_type;
+ u16 in_size;
+ u16 out_size = sizeof(*cmdq_ctxt);
+ int err;
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
+ cmdq_ctxt->func_idx = hinic_global_func_id_hw(hwdev);
+ in_size = sizeof(*cmdq_ctxt);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_CMDQ_CTXT_SET,
+ cmdq_ctxt, in_size,
+ &cmdq_ctxt_out, &out_size, 0);
+ if (err || !out_size || cmdq_ctxt_out.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, cmdq_ctxt_out.status, out_size);
+ return -EFAULT;
+ }
+ }
+
+ cmdqs->status |= HINIC_CMDQ_ENABLE;
+ cmdqs->disable_flag = 0;
+
+ return 0;
+}
+
+void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq *cmdq)
+{
+ struct hinic_cmdq_wqe *wqe;
+ struct hinic_cmdq_cmd_info *cmdq_info;
+ u16 ci, wqe_left, i;
+ u64 buf;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ wqe_left = cmdq->wq->q_depth - (u16)atomic_read(&cmdq->wq->delta);
+ ci = MASKED_WQE_IDX(cmdq->wq, cmdq->wq->cons_idx);
+ for (i = 0; i < wqe_left; i++, ci++) {
+ ci = MASKED_WQE_IDX(cmdq->wq, ci);
+ cmdq_info = &cmdq->cmd_infos[ci];
+
+ if (cmdq_info->cmd_type == HINIC_CMD_TYPE_SET_ARM)
+ continue;
+
+ if (cmdq->cmdq_type == HINIC_CMDQ_ASYNC) {
+ wqe = hinic_get_wqebb_addr(cmdq->wq, ci);
+ buf = wqe->wqe_lcmd.buf_desc.saved_async_buf;
+ wqe->wqe_lcmd.buf_desc.saved_async_buf = 0;
+
+ hinic_be32_to_cpu((void *)&buf, sizeof(u64));
+ if (buf)
+ hinic_free_cmd_buf(hwdev,
+ (struct hinic_cmd_buf *)buf);
+ } else {
+ if (cmdq_info->done) {
+ complete(cmdq_info->done);
+ cmdq_info->done = NULL;
+ cmdq_info->cmpt_code = NULL;
+ cmdq_info->direct_resp = NULL;
+ cmdq_info->errcode = NULL;
+ }
+ }
+ }
+
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic_cmdq_type cmdq_type;
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ hinic_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]);
+ cmdqs->cmdq[cmdq_type].wrapped = 1;
+ hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return hinic_set_cmdq_ctxts(hwdev);
+}
+
+int hinic_cmdqs_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs;
+ struct hinic_cmdq_ctxt *cmdq_ctxt;
+ enum hinic_cmdq_type type, cmdq_type;
+ size_t saved_wqs_size;
+ u32 max_wqe_size;
+ int err;
+
+ cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
+ if (!cmdqs)
+ return -ENOMEM;
+
+ hwdev->cmdqs = cmdqs;
+ cmdqs->hwdev = hwdev;
+
+ saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
+ cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL);
+ if (!cmdqs->saved_wqs) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate saved wqs\n");
+ err = -ENOMEM;
+ goto alloc_wqs_err;
+ }
+
+ cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev->dev_hdl,
+ HINIC_CMDQ_BUF_SIZE,
+ HINIC_CMDQ_BUF_SIZE, 0ULL);
+ if (!cmdqs->cmd_buf_pool) {
+ sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n");
+ err = -ENOMEM;
+ goto pool_create_err;
+ }
+
+ max_wqe_size = (u32)cmdq_wqe_size(WQE_LCMD_TYPE);
+ err = hinic_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ hwdev->dev_hdl, HINIC_MAX_CMDQ_TYPES,
+ hwdev->wq_page_size, CMDQ_WQEBB_SIZE,
+ HINIC_CMDQ_DEPTH, max_wqe_size);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq\n");
+ goto cmdq_alloc_err;
+ }
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
+ &cmdqs->saved_wqs[cmdq_type], cmdq_type);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type: %d\n",
+ cmdq_type);
+ goto init_cmdq_err;
+ }
+
+ cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
+ cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type],
+ &cmdqs->cmdq_pages, cmdq_ctxt);
+ }
+
+ err = hinic_set_cmdq_ctxts(hwdev);
+ if (err)
+ goto init_cmdq_err;
+
+ return 0;
+
+init_cmdq_err:
+ type = HINIC_CMDQ_SYNC;
+ for (; type < cmdq_type; type++)
+ free_cmdq(hwdev, &cmdqs->cmdq[type]);
+
+ hinic_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ HINIC_MAX_CMDQ_TYPES);
+
+cmdq_alloc_err:
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+
+pool_create_err:
+ kfree(cmdqs->saved_wqs);
+
+alloc_wqs_err:
+ kfree(cmdqs);
+
+ return err;
+}
+
+void hinic_cmdqs_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
+
+ cmdqs->status &= ~HINIC_CMDQ_ENABLE;
+
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ hinic_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]);
+ free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
+ }
+
+ hinic_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
+ HINIC_MAX_CMDQ_TYPES);
+
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+
+ kfree(cmdqs->saved_wqs);
+
+ kfree(cmdqs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
new file mode 100644
index 000000000000..0e0c89aae0d9
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_CMDQ_H_
+#define HINIC_CMDQ_H_
+
+#define HINIC_DB_OFF 0x00000800
+
+#define HINIC_SCMD_DATA_LEN 16
+
+#define HINIC_CMDQ_DEPTH 4096
+
+#define HINIC_CMDQ_BUF_SIZE 2048U
+#define HINIC_CMDQ_BUF_HW_RSVD 8
+#define HINIC_CMDQ_MAX_DATA_SIZE \
+ (HINIC_CMDQ_BUF_SIZE - HINIC_CMDQ_BUF_HW_RSVD)
+
+enum hinic_cmdq_type {
+ HINIC_CMDQ_SYNC,
+ HINIC_CMDQ_ASYNC,
+ HINIC_MAX_CMDQ_TYPES,
+};
+
+enum hinic_db_src_type {
+ HINIC_DB_SRC_CMDQ_TYPE,
+ HINIC_DB_SRC_L2NIC_SQ_TYPE,
+};
+
+enum hinic_cmdq_db_type {
+ HINIC_DB_SQ_RQ_TYPE,
+ HINIC_DB_CMDQ_TYPE,
+};
+
+/* CMDQ WQE CTRLS */
+struct hinic_cmdq_header {
+ u32 header_info;
+ u32 saved_data;
+};
+
+struct hinic_scmd_bufdesc {
+ u32 buf_len;
+ u32 rsvd;
+ u8 data[HINIC_SCMD_DATA_LEN];
+};
+
+struct hinic_lcmd_bufdesc {
+ struct hinic_sge sge;
+ u32 rsvd1;
+ u64 saved_async_buf;
+ u64 rsvd3;
+};
+
+struct hinic_cmdq_db {
+ u32 db_info;
+ u32 rsvd;
+};
+
+struct hinic_status {
+ u32 status_info;
+};
+
+struct hinic_ctrl {
+ u32 ctrl_info;
+};
+
+struct hinic_sge_resp {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_cmdq_completion {
+ /* HW Format */
+ union {
+ struct hinic_sge_resp sge_resp;
+ u64 direct_resp;
+ };
+};
+
+struct hinic_cmdq_wqe_scmd {
+ struct hinic_cmdq_header header;
+ struct hinic_cmdq_db db;
+ struct hinic_status status;
+ struct hinic_ctrl ctrl;
+ struct hinic_cmdq_completion completion;
+ struct hinic_scmd_bufdesc buf_desc;
+};
+
+struct hinic_cmdq_wqe_lcmd {
+ struct hinic_cmdq_header header;
+ struct hinic_status status;
+ struct hinic_ctrl ctrl;
+ struct hinic_cmdq_completion completion;
+ struct hinic_lcmd_bufdesc buf_desc;
+};
+
+struct hinic_cmdq_inline_wqe {
+ struct hinic_cmdq_wqe_scmd wqe_scmd;
+};
+
+struct hinic_cmdq_wqe {
+ /* HW Format */
+ union {
+ struct hinic_cmdq_inline_wqe inline_wqe;
+ struct hinic_cmdq_wqe_lcmd wqe_lcmd;
+ };
+};
+
+struct hinic_cmdq_arm_bit {
+ u32 q_type;
+ u32 q_id;
+};
+
+struct hinic_cmdq_ctxt_info {
+ u64 curr_wqe_page_pfn;
+ u64 wq_block_pfn;
+};
+
+struct hinic_cmdq_ctxt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 cmdq_id;
+ u8 ppf_idx;
+
+ u8 rsvd1[4];
+
+ struct hinic_cmdq_ctxt_info ctxt_info;
+};
+
+enum hinic_cmdq_status {
+ HINIC_CMDQ_ENABLE = BIT(0),
+};
+
+enum hinic_cmdq_cmd_type {
+ HINIC_CMD_TYPE_NONE,
+ HINIC_CMD_TYPE_SET_ARM,
+ HINIC_CMD_TYPE_DIRECT_RESP,
+ HINIC_CMD_TYPE_SGE_RESP,
+ HINIC_CMD_TYPE_ASYNC,
+ HINIC_CMD_TYPE_TIMEOUT,
+ HINIC_CMD_TYPE_FAKE_TIMEOUT,
+};
+
+struct hinic_cmdq_cmd_info {
+ enum hinic_cmdq_cmd_type cmd_type;
+
+ struct completion *done;
+ int *errcode;
+ int *cmpt_code;
+ u64 *direct_resp;
+ u64 cmdq_msg_id;
+};
+
+struct hinic_cmdq {
+ struct hinic_wq *wq;
+
+ enum hinic_cmdq_type cmdq_type;
+ int wrapped;
+
+ /* spinlock for send cmdq commands */
+ spinlock_t cmdq_lock;
+
+ /* doorbell area */
+ u8 __iomem *db_base;
+
+ struct hinic_cmdq_ctxt cmdq_ctxt;
+
+ struct hinic_cmdq_cmd_info *cmd_infos;
+
+ struct hinic_hwdev *hwdev;
+};
+
+struct hinic_cmdqs {
+ struct hinic_hwdev *hwdev;
+
+ struct dma_pool *cmd_buf_pool;
+
+ struct hinic_wq *saved_wqs;
+
+ struct hinic_cmdq_pages cmdq_pages;
+ struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES];
+
+ u32 status;
+ u32 disable_flag;
+};
+
+void hinic_cmdq_ceq_handler(void *hwdev, u32 ceqe_data);
+
+int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev);
+
+bool hinic_cmdq_idle(struct hinic_cmdq *cmdq);
+
+int hinic_cmdqs_init(struct hinic_hwdev *hwdev);
+
+void hinic_cmdqs_free(struct hinic_hwdev *hwdev);
+
+bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq_ctxt *cmdq_ctxt);
+
+void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev,
+ struct hinic_cmdq *cmdq);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c
deleted file mode 100644
index 8e9b4a6c88c2..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_common.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#include "hinic_common.h"
-
-/**
- * hinic_cpu_to_be32 - convert data to big endian 32 bit format
- * @data: the data to convert
- * @len: length of data to convert
- **/
-void hinic_cpu_to_be32(void *data, int len)
-{
- u32 *mem = data;
- int i;
-
- len = len / sizeof(u32);
-
- for (i = 0; i < len; i++) {
- *mem = cpu_to_be32(*mem);
- mem++;
- }
-}
-
-/**
- * hinic_be32_to_cpu - convert data from big endian 32 bit format
- * @data: the data to convert
- * @len: length of data to convert
- **/
-void hinic_be32_to_cpu(void *data, int len)
-{
- u32 *mem = data;
- int i;
-
- len = len / sizeof(u32);
-
- for (i = 0; i < len; i++) {
- *mem = be32_to_cpu(*mem);
- mem++;
- }
-}
-
-/**
- * hinic_set_sge - set dma area in scatter gather entry
- * @sge: scatter gather entry
- * @addr: dma address
- * @len: length of relevant data in the dma address
- **/
-void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len)
-{
- sge->hi_addr = upper_32_bits(addr);
- sge->lo_addr = lower_32_bits(addr);
- sge->len = len;
-}
-
-/**
- * hinic_sge_to_dma - get dma address from scatter gather entry
- * @sge: scatter gather entry
- *
- * Return dma address of sg entry
- **/
-dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge)
-{
- return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h
deleted file mode 100644
index a0de9d9644c6..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_common.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_COMMON_H
-#define HINIC_COMMON_H
-
-#include <linux/types.h>
-
-#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
-#define LOWER_8_BITS(data) ((data) & 0xFF)
-
-struct hinic_sge {
- u32 hi_addr;
- u32 lo_addr;
- u32 len;
-};
-
-void hinic_cpu_to_be32(void *data, int len);
-
-void hinic_be32_to_cpu(void *data, int len);
-
-void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len);
-
-dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_csr.h
new file mode 100644
index 000000000000..045a32d07359
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_csr.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_CSR_H
+#define HINIC_CSR_H
+
+#define HINIC_CSR_GLOBAL_BASE_ADDR 0x4000
+
+/* HW interface registers */
+#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
+#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
+#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8
+#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
+#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
+
+#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100
+#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C
+
+#define HINIC_CSR_DMA_ATTR_TBL_BASE 0xC80
+
+#define HINIC_ELECTION_BASE 0x200
+
+#define HINIC_CSR_DMA_ATTR_TBL_STRIDE 0x4
+#define HINIC_CSR_DMA_ATTR_TBL_ADDR(idx) \
+ (HINIC_CSR_DMA_ATTR_TBL_BASE \
+ + (idx) * HINIC_CSR_DMA_ATTR_TBL_STRIDE)
+
+#define HINIC_PPF_ELECTION_STRIDE 0x4
+#define HINIC_CSR_MAX_PORTS 4
+#define HINIC_CSR_PPF_ELECTION_ADDR \
+ (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE)
+
+#define HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR \
+ (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE + \
+ HINIC_CSR_MAX_PORTS * HINIC_PPF_ELECTION_STRIDE)
+
+/* MSI-X registers */
+#define HINIC_CSR_MSIX_CTRL_BASE 0x2000
+#define HINIC_CSR_MSIX_CNT_BASE 0x2004
+
+#define HINIC_CSR_MSIX_STRIDE 0x8
+
+#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \
+ (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
+
+#define HINIC_CSR_MSIX_CNT_ADDR(idx) \
+ (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
+
+/* EQ registers */
+#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200
+#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400
+
+#define HINIC_EQ_MTT_OFF_STRIDE 0x40
+
+#define HINIC_CSR_AEQ_MTT_OFF(id) \
+ (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_MTT_OFF(id) \
+ (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
+
+#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8
+
+#define HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
+ (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
+ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \
+ ((u32)(((type) == HINIC_AEQ) ? \
+ HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) : \
+ HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num)))
+
+#define HINIC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \
+ ((u32)(((type) == HINIC_AEQ) ? \
+ HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) : \
+ HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num)))
+
+#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00
+#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04
+#define HINIC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08
+#define HINIC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C
+
+#define HINIC_EQ_OFF_STRIDE 0x80
+
+#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \
+ (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \
+ (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \
+ (HINIC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \
+ (HINIC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000
+#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004
+#define HINIC_CEQ_CONS_IDX_0_ADDR_BASE 0x1008
+#define HINIC_CEQ_CONS_IDX_1_ADDR_BASE 0x100C
+
+/* For multi-host mgmt
+ * CEQ_CTRL_0_ADDR: bit26~29: uP write vf mode is normal(0x0),bmgw(0x1),
+ * vmgw(0x2)
+ */
+#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \
+ (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \
+ (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \
+ (HINIC_CEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \
+ (HINIC_CEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
+
+/* API CMD registers */
+#define HINIC_CSR_API_CMD_BASE 0xF000
+
+#define HINIC_CSR_API_CMD_STRIDE 0x100
+
+#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+#define HINIC_CSR_API_CMD_STATUS_0_ADDR(idx) \
+ (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE)
+
+/* VF control registers in pf */
+#define HINIC_PF_CSR_VF_FLUSH_BASE 0x1F400
+#define HINIC_PF_CSR_VF_FLUSH_STRIDE 0x4
+
+#define HINIC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C
+
+#define HINIC_ICPL_RESERVD_ADDR 0x9204
+
+#define HINIC_PF_CSR_VF_FLUSH_OFF(idx) \
+ (HINIC_PF_CSR_VF_FLUSH_BASE + (idx) * HINIC_PF_CSR_VF_FLUSH_STRIDE)
+
+#define HINIC_IPSU_CHANNEL_NUM 7
+#define HINIC_IPSU_CHANNEL0_ADDR 0x404
+#define HINIC_IPSU_CHANNEL_OFFSET 0x14
+#define HINIC_IPSU_DIP_OFFSET 13
+#define HINIC_IPSU_SIP_OFFSET 14
+#define HINIC_IPSU_DIP_SIP_MASK \
+ ((0x1 << HINIC_IPSU_SIP_OFFSET) | (0x1 << HINIC_IPSU_DIP_OFFSET))
+
+#define HINIC_IPSURX_VXLAN_DPORT_ADDR 0x6d4
+
+/* For multi-host mgmt
+ * 0x75C0: bit0~3: uP write, host mode is bmwg or normal host
+ * bit4~7: master host ppf write when function initializing
+ * bit8~23: only for slave host PXE
+ * 0x75C4: slave host status
+ * bit0~7: host 0~7 functions status
+ */
+#define HINIC_HOST_MODE_ADDR 0x75C0
+#define HINIC_MULT_HOST_SLAVE_STATUS_ADDR 0x75C4
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h b/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h
new file mode 100644
index 000000000000..d6a23e28e178
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __HINIC_CTX_DEF_H__
+#define __HINIC_CTX_DEF_H__
+
+#define MASKED_SQ_IDX(sq, idx) ((idx) & (sq)->wq->mask)
+
+#define HINIC_CEQE_QN_MASK 0x3FFU
+
+#define HINIC_Q_CTXT_MAX 42
+
+#define HINIC_RQ_CQ_MAX 128
+
+#define MAX_WQE_SIZE(max_sge, wqebb_size) \
+ (((max_sge) <= 2) ? (wqebb_size) : \
+ ((ALIGN(((max_sge) - 2), 4) / 4 + 1) * (wqebb_size)))
+
+/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */
+#define HINIC_CI_Q_ADDR_SIZE 64
+
+#define CI_TABLE_SIZE(num_qps, pg_sz) \
+ (ALIGN((num_qps) * HINIC_CI_Q_ADDR_SIZE, pg_sz))
+
+#define HINIC_CI_VADDR(base_addr, q_id) ((u8 *)(base_addr) + \
+ (q_id) * HINIC_CI_Q_ADDR_SIZE)
+
+#define HINIC_CI_PADDR(base_paddr, q_id) ((base_paddr) + \
+ (q_id) * HINIC_CI_Q_ADDR_SIZE)
+
+#define Q_CTXT_SIZE 48
+#define TSO_LRO_CTXT_SIZE 240
+
+#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+ (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \
+ + (q_id) * Q_CTXT_SIZE)
+
+#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
+ (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \
+ + (max_sqs) * Q_CTXT_SIZE + (q_id) * Q_CTXT_SIZE)
+
+#define SQ_CTXT_SIZE(num_sqs) ((u16)(sizeof(struct hinic_qp_ctxt_header) \
+ + (num_sqs) * sizeof(struct hinic_sq_ctxt)))
+
+#define RQ_CTXT_SIZE(num_rqs) ((u16)(sizeof(struct hinic_qp_ctxt_header) \
+ + (num_rqs) * sizeof(struct hinic_rq_ctxt)))
+
+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_SHIFT 8
+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13
+#define SQ_CTXT_CEQ_ATTR_EN_SHIFT 23
+#define SQ_CTXT_CEQ_ATTR_ARM_SHIFT 31
+
+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_MASK 0x1FU
+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FFU
+#define SQ_CTXT_CEQ_ATTR_EN_MASK 0x1U
+#define SQ_CTXT_CEQ_ATTR_ARM_MASK 0x1U
+
+#define SQ_CTXT_CEQ_ATTR_SET(val, member) \
+ (((val) & SQ_CTXT_CEQ_ATTR_##member##_MASK) \
+ << SQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define SQ_CTXT_CI_IDX_SHIFT 11
+#define SQ_CTXT_CI_OWNER_SHIFT 23
+
+#define SQ_CTXT_CI_IDX_MASK 0xFFFU
+#define SQ_CTXT_CI_OWNER_MASK 0x1U
+
+#define SQ_CTXT_CI_SET(val, member) \
+ (((val) & SQ_CTXT_CI_##member##_MASK) \
+ << SQ_CTXT_CI_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define SQ_CTXT_WQ_PAGE_PI_SHIFT 20
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
+#define SQ_CTXT_WQ_PAGE_PI_MASK 0xFFFU
+
+#define SQ_CTXT_WQ_PAGE_SET(val, member) \
+ (((val) & SQ_CTXT_WQ_PAGE_##member##_MASK) \
+ << SQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
+#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
+#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
+#define SQ_CTXT_PREF_CI_SHIFT 20
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
+#define SQ_CTXT_PREF_CI_MASK 0xFFFU
+
+#define SQ_CTXT_PREF_SET(val, member) \
+ (((val) & SQ_CTXT_PREF_##member##_MASK) \
+ << SQ_CTXT_PREF_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
+
+#define SQ_CTXT_WQ_BLOCK_SET(val, member) \
+ (((val) & SQ_CTXT_WQ_BLOCK_##member##_MASK) \
+ << SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 0
+#define RQ_CTXT_CEQ_ATTR_OWNER_SHIFT 1
+
+#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U
+#define RQ_CTXT_CEQ_ATTR_OWNER_MASK 0x1U
+
+#define RQ_CTXT_CEQ_ATTR_SET(val, member) \
+ (((val) & RQ_CTXT_CEQ_ATTR_##member##_MASK) \
+ << RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define RQ_CTXT_PI_IDX_SHIFT 0
+#define RQ_CTXT_PI_INTR_SHIFT 22
+#define RQ_CTXT_PI_CEQ_ARM_SHIFT 31
+
+#define RQ_CTXT_PI_IDX_MASK 0xFFFU
+#define RQ_CTXT_PI_INTR_MASK 0x3FFU
+#define RQ_CTXT_PI_CEQ_ARM_MASK 0x1U
+
+#define RQ_CTXT_PI_SET(val, member) \
+ (((val) & RQ_CTXT_PI_##member##_MASK) << \
+ RQ_CTXT_PI_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define RQ_CTXT_WQ_PAGE_CI_SHIFT 20
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
+#define RQ_CTXT_WQ_PAGE_CI_MASK 0xFFFU
+
+#define RQ_CTXT_WQ_PAGE_SET(val, member) \
+ (((val) & RQ_CTXT_WQ_PAGE_##member##_MASK) << \
+ RQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
+#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
+#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
+#define RQ_CTXT_PREF_CI_SHIFT 20
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
+#define RQ_CTXT_PREF_CI_MASK 0xFFFU
+
+#define RQ_CTXT_PREF_SET(val, member) \
+ (((val) & RQ_CTXT_PREF_##member##_MASK) << \
+ RQ_CTXT_PREF_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
+
+#define RQ_CTXT_WQ_BLOCK_SET(val, member) \
+ (((val) & RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
+ RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4)
+
+#define WQ_PAGE_PFN_SHIFT 12
+#define WQ_BLOCK_PFN_SHIFT 9
+
+#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
+
+enum sq_cflag {
+ CFLAG_DATA_PATH = 0,
+};
+
+enum hinic_qp_ctxt_type {
+ HINIC_QP_CTXT_TYPE_SQ,
+ HINIC_QP_CTXT_TYPE_RQ,
+};
+
+/* service type relates define */
+enum cfg_svc_type_en {
+ CFG_SVC_NIC_BIT0 = (1 << 0),
+ CFG_SVC_ROCE_BIT1 = (1 << 1),
+ CFG_SVC_FCOE_BIT2 = (1 << 2),
+ CFG_SVC_TOE_BIT3 = (1 << 3),
+ CFG_SVC_IWARP_BIT4 = (1 << 4),
+ CFG_SVC_FC_BIT5 = (1 << 5),
+
+ CFG_SVC_FIC_BIT6 = (1 << 6),
+ CFG_SVC_OVS_BIT7 = (1 << 7),
+ CFG_SVC_ACL_BIT8 = (1 << 8),
+ CFG_SVC_IOE_BIT9 = (1 << 9),
+ CFG_SVC_HWPT_BIT10 = (1 << 10),
+
+ CFG_SVC_FT_EN = (CFG_SVC_FCOE_BIT2 | CFG_SVC_TOE_BIT3 |
+ CFG_SVC_FC_BIT5 | CFG_SVC_IOE_BIT9),
+ CFG_SVC_RDMA_EN = (CFG_SVC_ROCE_BIT1 | CFG_SVC_IWARP_BIT4)
+};
+
+#define IS_NIC_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_NIC_BIT0)
+#define IS_ROCE_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_ROCE_BIT1)
+#define IS_FCOE_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FCOE_BIT2)
+#define IS_TOE_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_TOE_BIT3)
+#define IS_IWARP_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_IWARP_BIT4)
+#define IS_FC_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FC_BIT5)
+#define IS_FIC_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FIC_BIT6)
+#define IS_OVS_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_OVS_BIT7)
+#define IS_ACL_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_ACL_BIT8)
+#define IS_IOE_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_IOE_BIT9)
+#define IS_FT_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FT_EN)
+#define IS_RDMA_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_RDMA_EN)
+#define IS_HWPT_TYPE(dev) \
+ ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_HWPT_BIT10)
+
+#endif /* __HINIC_CTX_DEF_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbg.h b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h
new file mode 100644
index 000000000000..415fe989ef33
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_NIC_DBG_H_
+#define HINIC_NIC_DBG_H_
+
+u16 hinic_dbg_get_qp_num(void *hwdev);
+
+void *hinic_dbg_get_qp_handle(void *hwdev, u16 q_id);
+
+void *hinic_dbg_get_sq_wq_handle(void *hwdev, u16 q_id);
+
+void *hinic_dbg_get_rq_wq_handle(void *hwdev, u16 q_id);
+
+u16 hinic_dbg_get_sq_pi(void *hwdev, u16 q_id);
+
+u16 hinic_dbg_get_rq_hw_pi(void *hwdev, u16 q_id);
+
+u16 hinic_dbg_get_rq_sw_pi(void *hwdev, u16 q_id);
+
+void *hinic_dbg_get_sq_ci_addr(void *hwdev, u16 q_id);
+
+u64 hinic_dbg_get_sq_cla_addr(void *hwdev, u16 q_id);
+
+u64 hinic_dbg_get_rq_cla_addr(void *hwdev, u16 q_id);
+
+int hinic_dbg_get_sq_db_addr(void *hwdev, u16 q_id, u64 **map_addr,
+ u64 *phy_addr, u32 *pg_idx);
+
+u16 hinic_dbg_get_global_qpn(const void *hwdev);
+
+int hinic_dbg_get_sq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size);
+
+int hinic_dbg_get_rq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size);
+
+int hinic_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance,
+ u32 lt_index, u8 *data);
+
+int hinic_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance,
+ u32 lt_index, u8 *data, u16 mask);
+
+int hinic_sm_ctr_rd32(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u32 *value);
+
+int hinic_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u32 *value);
+
+int hinic_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value);
+
+int hinic_sm_ctr_rd64(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 *value);
+
+int hinic_sm_ctr_wr64(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 value);
+
+int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, u32 ctr_id,
+ u64 *value1, u64 *value2);
+
+int hinic_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance, u32 ctr_id,
+ u64 value1, u64 value2);
+
+int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val);
+
+int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val);
+
+int hinic_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val);
+
+int hinic_api_csr_wr64(void *hwdev, u8 dest, u32 addr, u64 val);
+
+int hinic_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size);
+
+u16 hinic_dbg_clear_hw_stats(void *hwdev, u32 *out_size);
+
+void hinic_get_chip_fault_stats(const void *hwdev,
+ u8 *chip_fault_stats, int offset);
+
+int hinic_dbg_get_pf_bw_limit(void *hwdev, u32 *pf_bw_limit);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
new file mode 100644
index 000000000000..08aa1d7d2709
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/if.h>
+#include <linux/ioctl.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_nic_dev.h"
+#include "hinic_lld.h"
+#include "hinic_dbgtool_knl.h"
+
+struct ffm_intr_info {
+ u8 node_id;
+ /* error level of the interrupt source */
+ u8 err_level;
+ /* Classification by interrupt source properties */
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+};
+
+#define DBGTOOL_MSG_MAX_SIZE 2048ULL
+#define HINIC_SELF_CMD_UP2PF_FFM 0x26
+
+void *g_hinic_card_node_array[MAX_CARD_NUM] = {0};
+void *g_hinic_card_vir_addr[MAX_CARD_NUM] = {0};
+u64 g_hinic_card_phy_addr[MAX_CARD_NUM] = {0};
+/* lock for g_hinic_card_vir_addr */
+struct mutex g_hinic_addr_lock;
+int g_hinic_card_id;
+
+/* dbgtool character device name, class name, dev path */
+#define CHR_DEV_DBGTOOL "dbgtool_chr_dev"
+#define CLASS_DBGTOOL "dbgtool_class"
+#define DBGTOOL_DEV_PATH "/dev/dbgtool_chr_dev"
+
+struct dbgtool_k_glb_info {
+ struct semaphore dbgtool_sem;
+ struct ffm_record_info *ffm;
+};
+
+static dev_t dbgtool_dev_id; /* device id */
+static struct cdev dbgtool_chr_dev; /* struct of char device */
+
+/*lint -save -e104 -e808*/
+static struct class *dbgtool_d_class; /* struct of char class */
+/*lint -restore*/
+
+static int g_dbgtool_init_flag;
+static int g_dbgtool_ref_cnt;
+
+static int dbgtool_knl_open(struct inode *pnode,
+ struct file *pfile)
+{
+ return 0;
+}
+
+static int dbgtool_knl_release(struct inode *pnode,
+ struct file *pfile)
+{
+ return 0;
+}
+
+static ssize_t dbgtool_knl_read(struct file *pfile,
+ char __user *ubuf,
+ size_t size,
+ loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t dbgtool_knl_write(struct file *pfile,
+ const char __user *ubuf,
+ size_t size,
+ loff_t *ppos)
+{
+ return 0;
+}
+
+static bool is_valid_phy_addr(u64 offset)
+{
+ int i;
+
+ for (i = 0; i < MAX_CARD_NUM; i++) {
+ if (offset == g_hinic_card_phy_addr[i])
+ return true;
+ }
+
+ return false;
+}
+
+int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long vmsize = vma->vm_end - vma->vm_start;
+ phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ phys_addr_t phy_addr;
+
+ if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) {
+ pr_err("Map size = %lu is bigger than alloc\n", vmsize);
+ return -EAGAIN;
+ }
+
+ if (offset && !is_valid_phy_addr((u64)offset) &&
+ !hinic_is_valid_bar_addr((u64)offset)) {
+ pr_err("offset is invalid");
+ return -EAGAIN;
+ }
+
+ /* old version of tool set vma->vm_pgoff to 0 */
+ phy_addr = offset ? offset : g_hinic_card_phy_addr[g_hinic_card_id];
+ if (!phy_addr) {
+ pr_err("Card_id = %d physical address is 0\n", g_hinic_card_id);
+ return -EAGAIN;
+ }
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ (phy_addr >> PAGE_SHIFT),
+ vmsize, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * dbgtool_knl_api_cmd_read - used for read operations
+ * @para: the dbgtool parameter
+ * @g_func_handle_array: global function handle
+ * Return: 0 - success, negative - failure
+ */
+static long dbgtool_knl_api_cmd_read(struct dbgtool_param *para,
+ void **g_func_handle_array)
+{
+ long ret = 0;
+ u8 *cmd;
+ u16 size;
+ void *ack;
+ u16 ack_size;
+ u32 pf_id;
+ void *hwdev;
+
+ pf_id = para->param.api_rd.pf_id;
+ if (pf_id >= 16) {
+ pr_err("PF id(0x%x) too big\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* obtaining pf_id chipif pointer */
+ hwdev = g_func_handle_array[pf_id];
+ if (!hwdev) {
+ pr_err("PF id(0x%x) handle null in api cmd read\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* alloc cmd and ack memory */
+ size = para->param.api_rd.size;
+ if (para->param.api_rd.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) {
+ pr_err("Read cmd size invalid or more than 2M\n");
+ return -EINVAL;
+ }
+ cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ ack_size = para->param.api_rd.ack_size;
+ if (para->param.api_rd.ack_size == 0) {
+ pr_err("Read cmd ack size is 0\n");
+ ret = -ENOMEM;
+ goto alloc_ack_mem_fail;
+ }
+
+ ack = kzalloc((unsigned long long)ack_size, GFP_KERNEL);
+ if (!ack) {
+ ret = -ENOMEM;
+ goto alloc_ack_mem_fail;
+ }
+
+ /* cmd content copied from user-mode */
+ if (copy_from_user(cmd, para->param.api_rd.cmd, (unsigned long)size)) {
+ pr_err("Copy cmd from user fail\n");
+ ret = -EFAULT;
+ goto copy_user_cmd_fail;
+ }
+ /* Invoke the api cmd interface read content*/
+ ret = hinic_api_cmd_read_ack(hwdev, para->param.api_rd.dest,
+ cmd, size, ack, ack_size);
+ if (ret) {
+ pr_err("Api send single cmd ack fail!\n");
+ goto api_rd_fail;
+ }
+
+ /* Copy the contents of the ack to the user state */
+ if (copy_to_user(para->param.api_rd.ack, ack, ack_size)) {
+ pr_err("Copy ack to user fail\n");
+ ret = -EFAULT;
+ }
+api_rd_fail:
+copy_user_cmd_fail:
+ kfree(ack);
+alloc_ack_mem_fail:
+ kfree(cmd);
+ return ret;
+}
+
+/**
+ * dbgtool_knl_api_cmd_write - used for write operations
+ * @para: the dbgtool parameter
+ * @g_func_handle_array: global function handle
+ * Return: 0 - success, negative - failure
+ */
+static long dbgtool_knl_api_cmd_write(struct dbgtool_param *para,
+ void **g_func_handle_array)
+{
+ long ret = 0;
+ u8 *cmd;
+ u16 size;
+ u32 pf_id;
+ void *hwdev;
+
+ pf_id = para->param.api_wr.pf_id;
+ if (pf_id >= 16) {
+ pr_err("PF id(0x%x) too big\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* obtaining chipif pointer according to pf_id */
+ hwdev = g_func_handle_array[pf_id];
+ if (!hwdev) {
+ pr_err("PF id(0x%x) handle null\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* alloc cmd memory */
+ size = para->param.api_wr.size;
+ if (para->param.api_wr.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) {
+ pr_err("Write cmd size invalid or more than 2M\n");
+ return -EINVAL;
+ }
+ cmd = kzalloc((unsigned long long)size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /* cmd content copied from user-mode */
+ if (copy_from_user(cmd, para->param.api_wr.cmd, (unsigned long)size)) {
+ pr_err("Copy cmd from user fail\n");
+ ret = -EFAULT;
+ goto copy_user_cmd_fail;
+ }
+
+ /* api cmd interface is invoked to write the content */
+ ret = hinic_api_cmd_write_nack(hwdev, para->param.api_wr.dest,
+ cmd, size);
+ if (ret)
+ pr_err("Api send single cmd nack fail\n");
+
+copy_user_cmd_fail:
+ kfree(cmd);
+ return ret;
+}
+
+void hinic_chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_idx,
+ void **g_func_handle_array)
+{
+ u32 func_idx;
+ struct hinic_hwdev *hwdev;
+
+ if (!dev_info) {
+ pr_err("Params error!\n");
+ return;
+ }
+
+ /* pf at most 16 */
+ for (func_idx = 0; func_idx < 16; func_idx++) {
+ hwdev = (struct hinic_hwdev *)g_func_handle_array[func_idx];
+
+ dev_info[func_idx].phy_addr = g_hinic_card_phy_addr[card_idx];
+
+ if (!hwdev) {
+ dev_info[func_idx].bar0_size = 0;
+ dev_info[func_idx].bus = 0;
+ dev_info[func_idx].slot = 0;
+ dev_info[func_idx].func = 0;
+ } else {
+ dev_info[func_idx].bar0_size =
+ pci_resource_len
+ (((struct pci_dev *)hwdev->pcidev_hdl), 0);
+ dev_info[func_idx].bus =
+ ((struct pci_dev *)
+ hwdev->pcidev_hdl)->bus->number;
+ dev_info[func_idx].slot =
+ PCI_SLOT(((struct pci_dev *)hwdev->pcidev_hdl)
+ ->devfn);
+ dev_info[func_idx].func =
+ PCI_FUNC(((struct pci_dev *)hwdev->pcidev_hdl)
+ ->devfn);
+ }
+ }
+}
+
+/**
+ * dbgtool_knl_pf_dev_info_get - Obtain the pf sdk_info
+ * @para: the dbgtool parameter
+ * @g_func_handle_array: global function handle
+ * Return: 0 - success, negative - failure
+ */
+static long dbgtool_knl_pf_dev_info_get(struct dbgtool_param *para,
+ void **g_func_handle_array)
+{
+ struct pf_dev_info dev_info[16] = { {0} };
+ unsigned char *tmp;
+ int i;
+
+ mutex_lock(&g_hinic_addr_lock);
+ if (!g_hinic_card_vir_addr[g_hinic_card_id]) {
+ g_hinic_card_vir_addr[g_hinic_card_id] =
+ (void *)__get_free_pages(GFP_KERNEL,
+ DBGTOOL_PAGE_ORDER);
+ if (!g_hinic_card_vir_addr[g_hinic_card_id]) {
+ pr_err("Alloc dbgtool api chain fail!\n");
+ mutex_unlock(&g_hinic_addr_lock);
+ return -EFAULT;
+ }
+
+ memset(g_hinic_card_vir_addr[g_hinic_card_id], 0,
+ PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
+
+ g_hinic_card_phy_addr[g_hinic_card_id] =
+ virt_to_phys(g_hinic_card_vir_addr[g_hinic_card_id]);
+ if (!g_hinic_card_phy_addr[g_hinic_card_id]) {
+ pr_err("phy addr for card %d is 0\n", g_hinic_card_id);
+ free_pages((unsigned long)g_hinic_card_vir_addr[g_hinic_card_id],
+ DBGTOOL_PAGE_ORDER);
+ g_hinic_card_vir_addr[g_hinic_card_id] = NULL;
+ mutex_unlock(&g_hinic_addr_lock);
+ return -EFAULT;
+ }
+
+ tmp = g_hinic_card_vir_addr[g_hinic_card_id];
+ for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
+ SetPageReserved(virt_to_page(tmp));
+ tmp += PAGE_SIZE;
+ }
+ }
+ mutex_unlock(&g_hinic_addr_lock);
+
+ hinic_chipif_get_all_pf_dev_info(dev_info, g_hinic_card_id, g_func_handle_array);
+
+ /* Copy the dev_info to user mode */
+ if (copy_to_user(para->param.dev_info, dev_info,
+ (unsigned int)sizeof(dev_info))) {
+ pr_err("Copy dev_info to user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * dbgtool_knl_ffm_info_rd - Read ffm information
+ * @para: the dbgtool parameter
+ * @dbgtool_info: the dbgtool info
+ * Return: 0 - success, negative - failure
+ */
+static long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para,
+ struct dbgtool_k_glb_info *dbgtool_info)
+{
+ /* Copy the ffm_info to user mode */
+ if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm,
+ (unsigned int)sizeof(struct ffm_record_info))) {
+ pr_err("Copy ffm_info to user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * dbgtool_knl_ffm_info_clr - Clear FFM information
+ * @para: unused
+ * @dbgtool_info: the dbgtool info
+ */
+static void dbgtool_knl_ffm_info_clr(struct dbgtool_param *para,
+ struct dbgtool_k_glb_info *dbgtool_info)
+{
+ dbgtool_info->ffm->ffm_num = 0;
+}
+
+/**
+ * dbgtool_knl_msg_to_up - After receiving dbgtool command sends a message to uP
+ * @para: the dbgtool parameter
+ * @g_func_handle_array: global function handle
+ * Return: 0 - success, negative - failure
+ */
+static long dbgtool_knl_msg_to_up(struct dbgtool_param *para,
+ void **g_func_handle_array)
+{
+ long ret = 0;
+ void *buf_in;
+ void *buf_out;
+ u16 out_size;
+ u8 pf_id;
+
+ if (para->param.msg2up.in_size > DBGTOOL_MSG_MAX_SIZE) {
+ pr_err("User data(%d) more than 2KB\n",
+ para->param.msg2up.in_size);
+ return -EFAULT;
+ }
+
+ pf_id = para->param.msg2up.pf_id;
+ /* pf at most 16 */
+ if (pf_id >= 16) {
+ pr_err("PF id(0x%x) too big in message to mgmt\n", pf_id);
+ return -EFAULT;
+ }
+
+ if (!g_func_handle_array[pf_id]) {
+ pr_err("PF id(0x%x) handle null in message to mgmt\n", pf_id);
+ return -EFAULT;
+ }
+
+ /* alloc buf_in and buf_out memory, apply for 2K */
+ buf_in = kzalloc(DBGTOOL_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!buf_in)
+ return -ENOMEM;
+
+ buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0);
+ if (!buf_out) {
+ ret = -ENOMEM;
+ goto alloc_buf_out_mem_fail;
+ }
+
+ /* copy buf_in from the user state */
+ if (copy_from_user(buf_in, para->param.msg2up.buf_in,
+ (unsigned long)para->param.msg2up.in_size)) {
+ pr_err("Copy buf_in from user fail\n");
+ ret = -EFAULT;
+ goto copy_user_buf_in_fail;
+ }
+
+ out_size = DBGTOOL_MSG_MAX_SIZE;
+ /* Invoke the pf2up communication interface */
+ ret = hinic_msg_to_mgmt_sync(g_func_handle_array[pf_id],
+ para->param.msg2up.mod,
+ para->param.msg2up.cmd,
+ buf_in,
+ para->param.msg2up.in_size,
+ buf_out,
+ &out_size,
+ 0);
+ if (ret)
+ goto msg_2_up_fail;
+
+ /* Copy the out_size and buf_out content to user mode */
+ if (copy_to_user(para->param.msg2up.out_size, &out_size,
+ (unsigned int)sizeof(out_size))) {
+ pr_err("Copy out_size to user fail\n");
+ ret = -EFAULT;
+ goto copy_out_size_fail;
+ }
+
+ if (copy_to_user(para->param.msg2up.buf_out, buf_out, out_size)) {
+ pr_err("Copy buf_out to user fail\n");
+ ret = -EFAULT;
+ }
+
+copy_out_size_fail:
+msg_2_up_fail:
+copy_user_buf_in_fail:
+ kfree(buf_out);
+alloc_buf_out_mem_fail:
+ kfree(buf_in);
+ return ret;
+}
+
+long hinic_dbgtool_knl_free_mem(int id)
+{
+ unsigned char *tmp;
+ int i;
+
+ mutex_lock(&g_hinic_addr_lock);
+
+ if (!g_hinic_card_vir_addr[id]) {
+ mutex_unlock(&g_hinic_addr_lock);
+ return 0;
+ }
+
+ tmp = g_hinic_card_vir_addr[id];
+ for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
+ ClearPageReserved(virt_to_page(tmp));
+ tmp += PAGE_SIZE;
+ }
+
+ free_pages((unsigned long)g_hinic_card_vir_addr[id], DBGTOOL_PAGE_ORDER);
+ g_hinic_card_vir_addr[id] = NULL;
+ g_hinic_card_phy_addr[id] = 0;
+
+ mutex_unlock(&g_hinic_addr_lock);
+
+ return 0;
+}
+
+/*lint -save -e771 -e794*/
+
+/**
+ * dbgtool_knl_unlocked_ioctl - dbgtool ioctl entry
+ * @pfile: the pointer to file
+ * @cmd: the command type
+ */
+static long dbgtool_knl_unlocked_ioctl(struct file *pfile,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+ unsigned int real_cmd;
+ struct dbgtool_param param;
+ struct dbgtool_k_glb_info *dbgtool_info;
+ struct card_node *card_info = NULL;
+ int i;
+
+ (void)memset(¶m, 0, sizeof(param));
+
+ if (copy_from_user(¶m, (void *)arg, sizeof(param))) {
+ pr_err("Copy param from user fail\n");
+ return -EFAULT;
+ }
+
+ param.chip_name[IFNAMSIZ - 1] = '\0';
+ for (i = 0; i < MAX_CARD_NUM; i++) {
+ card_info = (struct card_node *)g_hinic_card_node_array[i];
+ if (!card_info)
+ continue;
+ if (!strncmp(param.chip_name, card_info->chip_name, IFNAMSIZ))
+ break;
+ }
+
+ if (i == MAX_CARD_NUM || !card_info) {
+ pr_err("Can't find this card %s\n", param.chip_name);
+ return -EFAULT;
+ }
+
+ g_hinic_card_id = i;
+
+ dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
+
+ down(&dbgtool_info->dbgtool_sem);
+
+ real_cmd = _IOC_NR(cmd);
+
+ switch (real_cmd) {
+ case DBGTOOL_CMD_API_RD:
+ ret = dbgtool_knl_api_cmd_read(¶m,
+ card_info->func_handle_array);
+ break;
+ case DBGTOOL_CMD_API_WR:
+ ret = dbgtool_knl_api_cmd_write(¶m,
+ card_info->func_handle_array);
+ break;
+ case DBGTOOL_CMD_FFM_RD:
+ ret = dbgtool_knl_ffm_info_rd(¶m, dbgtool_info);
+ break;
+ case DBGTOOL_CMD_FFM_CLR:
+ dbgtool_knl_ffm_info_clr(¶m, dbgtool_info);
+ break;
+ case DBGTOOL_CMD_PF_DEV_INFO_GET:
+ ret = dbgtool_knl_pf_dev_info_get(¶m,
+ card_info->func_handle_array);
+ break;
+ case DBGTOOL_CMD_MSG_2_UP:
+ ret = dbgtool_knl_msg_to_up(¶m,
+ card_info->func_handle_array);
+ break;
+ case DBGTOOL_CMD_FREE_MEM:
+ ret = hinic_dbgtool_knl_free_mem(i);
+ break;
+ default:
+ pr_err("Dbgtool cmd(x%x) not support now\n", real_cmd);
+ ret = -EFAULT;
+ }
+
+ up(&dbgtool_info->dbgtool_sem);
+ return ret;
+}
+
+/**
+ * ffm_intr_msg_record - FFM interruption records sent up
+ * @handle: the function handle
+ * @buf_in: the pointer to input buffer
+ * @buf_out: the pointer to outputput buffer
+ */
+static void ffm_intr_msg_record(void *handle, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct dbgtool_k_glb_info *dbgtool_info;
+ struct ffm_intr_info *intr;
+ u32 ffm_idx;
+ struct tm tm;
+ struct card_node *card_info = NULL;
+ struct hinic_hwdev *hwdev = handle;
+ bool flag = false;
+ int i, j;
+
+ for (i = 0; i < MAX_CARD_NUM; i++) {
+ card_info = (struct card_node *)g_hinic_card_node_array[i];
+ if (!card_info)
+ continue;
+
+ for (j = 0; j < MAX_FUNCTION_NUM; j++) {
+ if (handle == card_info->func_handle_array[j]) {
+ flag = true;
+ break;
+ }
+ }
+
+ if (flag)
+ break;
+ }
+
+ if (i == MAX_CARD_NUM || !card_info) {
+ pr_err("Id(%d) can't find this card\n", i);
+ return;
+ }
+
+ dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info;
+ if (!dbgtool_info) {
+ pr_err("Dbgtool info is null\n");
+ return;
+ }
+
+ intr = (struct ffm_intr_info *)buf_in;
+
+ if (!dbgtool_info->ffm)
+ return;
+
+ ffm_idx = dbgtool_info->ffm->ffm_num;
+ if (ffm_idx < FFM_RECORD_NUM_MAX) {
+ nic_info(hwdev->dev_hdl, "%s: recv intr, ffm_idx: %d\n",
+ __func__, ffm_idx);
+
+ dbgtool_info->ffm->ffm[ffm_idx].node_id = intr->node_id;
+ dbgtool_info->ffm->ffm[ffm_idx].err_level = intr->err_level;
+ dbgtool_info->ffm->ffm[ffm_idx].err_type = intr->err_type;
+ dbgtool_info->ffm->ffm[ffm_idx].err_csr_addr =
+ intr->err_csr_addr;
+ dbgtool_info->ffm->ffm[ffm_idx].err_csr_value =
+ intr->err_csr_value;
+
+ /* Calculate the time in date value to tm */
+ time64_to_tm(ktime_to_ms(ktime_get_real()) / MSEC_PER_SEC, 0, &tm);
+
+ /* tm_year starts from 1900; 0->1900, 1->1901, and so on */
+ dbgtool_info->ffm->ffm[ffm_idx].year =
+ (u16)(tm.tm_year + 1900);
+ /* tm_mon starts from 0, 0 indicates January, and so on */
+ dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)tm.tm_mon + 1;
+ dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)tm.tm_mday;
+ dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)tm.tm_hour;
+ dbgtool_info->ffm->ffm[ffm_idx].min = (u8)tm.tm_min;
+ dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)tm.tm_sec;
+
+ dbgtool_info->ffm->ffm_num++;
+ }
+}
+
+/*lint -restore*/
+
+/*lint -save -e785 -e438*/
+static const struct file_operations dbgtool_file_operations = {
+ .owner = THIS_MODULE,
+ .open = dbgtool_knl_open,
+ .release = dbgtool_knl_release,
+ .read = dbgtool_knl_read,
+ .write = dbgtool_knl_write,
+ .unlocked_ioctl = dbgtool_knl_unlocked_ioctl,
+ .mmap = hinic_mem_mmap,
+};
+
+/**
+ * hinic_dbgtool_knl_init - dbgtool character device init
+ * @hwdev: the pointer to hardware device
+ * @chip_node: the pointer to card node
+ * Return: 0 - success, negative - failure
+ */
+int hinic_dbgtool_knl_init(void *vhwdev, void *chip_node)
+{
+ int ret = 0;
+ int id;
+ struct dbgtool_k_glb_info *dbgtool_info;
+ struct device *pdevice;
+ struct card_node *chip_info = (struct card_node *)chip_node;
+ struct hinic_hwdev *hwdev = vhwdev;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ ret = sysfs_create_file(&((struct device *)(hwdev->dev_hdl))->kobj,
+ &chip_info->dbgtool_attr_file);
+ if (ret) {
+ pr_err("Failed to sysfs create file\n");
+ return ret;
+ }
+
+ chip_info->func_handle_array[hinic_global_func_id(hwdev)] = hwdev;
+
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev, HINIC_SELF_CMD_UP2PF_FFM,
+ ffm_intr_msg_record);
+
+ if (chip_info->dbgtool_info) {
+ chip_info->func_num++;
+ return 0;
+ }
+
+ dbgtool_info = (struct dbgtool_k_glb_info *)
+ kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL);
+ if (!dbgtool_info) {
+ pr_err("Failed to allocate dbgtool_info\n");
+ ret = -EFAULT;
+ goto dbgtool_info_fail;
+ }
+ chip_info->dbgtool_info = dbgtool_info;
+
+ /* FFM init */
+ dbgtool_info->ffm = (struct ffm_record_info *)
+ kzalloc(sizeof(struct ffm_record_info),
+ GFP_KERNEL);
+ if (!dbgtool_info->ffm) {
+ pr_err("Failed to allocate cell contexts for a chain\n");
+ ret = -EFAULT;
+ goto dbgtool_info_ffm_fail;
+ }
+
+ sema_init(&dbgtool_info->dbgtool_sem, 1);
+
+ ret = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id);
+ if (ret <= 0) {
+ pr_err("Failed to get hinic id\n");
+ goto sscanf_chdev_fail;
+ }
+
+ g_hinic_card_node_array[id] = chip_info;
+ chip_info->func_num++;
+
+ if (g_dbgtool_init_flag) {
+ g_dbgtool_ref_cnt++;
+ /* already initialized */
+ return 0;
+ }
+
+ /*alloc device id*/
+ ret = alloc_chrdev_region(&(dbgtool_dev_id), 0, 1, CHR_DEV_DBGTOOL);
+ if (ret) {
+ pr_err("Alloc dbgtool chrdev region fail, ret=0x%x\n", ret);
+ goto alloc_chdev_fail;
+ }
+
+ /*init device*/
+ cdev_init(&(dbgtool_chr_dev), &dbgtool_file_operations);
+
+ /*add device*/
+ ret = cdev_add(&(dbgtool_chr_dev), dbgtool_dev_id, 1);
+ if (ret) {
+ pr_err("Add dgbtool dev fail, ret=0x%x\n", ret);
+ goto cdev_add_fail;
+ }
+
+ /*lint -save -e160*/
+ dbgtool_d_class = class_create(CLASS_DBGTOOL);
+ /*lint -restore*/
+ if (IS_ERR(dbgtool_d_class)) {
+ pr_err("Create dgbtool class fail\n");
+ ret = -EFAULT;
+ goto cls_create_fail;
+ }
+
+ /* Export device information to user space
+ * (/sys/class/class name/device name)
+ */
+ pdevice = device_create(dbgtool_d_class, NULL,
+ dbgtool_dev_id, NULL, CHR_DEV_DBGTOOL);
+ if (IS_ERR(pdevice)) {
+ pr_err("Create dgbtool device fail\n");
+ ret = -EFAULT;
+ goto dev_create_fail;
+ }
+ g_dbgtool_init_flag = 1;
+ g_dbgtool_ref_cnt = 1;
+ mutex_init(&g_hinic_addr_lock);
+
+ return 0;
+
+dev_create_fail:
+ class_destroy(dbgtool_d_class);
+cls_create_fail:
+ cdev_del(&(dbgtool_chr_dev));
+cdev_add_fail:
+ unregister_chrdev_region(dbgtool_dev_id, 1);
+alloc_chdev_fail:
+ g_hinic_card_node_array[id] = NULL;
+sscanf_chdev_fail:
+ kfree(dbgtool_info->ffm);
+dbgtool_info_ffm_fail:
+ kfree(dbgtool_info);
+ dbgtool_info = NULL;
+ chip_info->dbgtool_info = NULL;
+dbgtool_info_fail:
+ hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM);
+ chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL;
+ sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
+ &chip_info->dbgtool_attr_file);
+ return ret;
+}
+
+/**
+ * hinic_dbgtool_knl_deinit - dbgtool character device deinit
+ * @hwdev: the pointer to hardware device
+ * @chip_node: the pointer to card node
+ */
+void hinic_dbgtool_knl_deinit(void *vhwdev, void *chip_node)
+{
+ struct dbgtool_k_glb_info *dbgtool_info;
+ struct card_node *chip_info = (struct card_node *)chip_node;
+ int id;
+ int err;
+ struct hinic_hwdev *hwdev = vhwdev;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return;
+
+ hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM);
+
+ chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL;
+
+ sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj,
+ &chip_info->dbgtool_attr_file);
+
+ chip_info->func_num--;
+ if (chip_info->func_num)
+ return;
+
+ err = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id);
+ if (err <= 0)
+ pr_err("Failed to get hinic id\n");
+
+ g_hinic_card_node_array[id] = NULL;
+
+ dbgtool_info = chip_info->dbgtool_info;
+ /* FFM deinit */
+ kfree(dbgtool_info->ffm);
+ dbgtool_info->ffm = NULL;
+
+ kfree(dbgtool_info);
+ chip_info->dbgtool_info = NULL;
+
+ (void)hinic_dbgtool_knl_free_mem(id);
+
+ if (g_dbgtool_init_flag) {
+ if ((--g_dbgtool_ref_cnt))
+ return;
+ }
+
+ if (!dbgtool_d_class)
+ return;
+
+ device_destroy(dbgtool_d_class, dbgtool_dev_id);
+ class_destroy(dbgtool_d_class);
+ dbgtool_d_class = NULL;
+
+ cdev_del(&(dbgtool_chr_dev));
+ unregister_chrdev_region(dbgtool_dev_id, 1);
+
+ g_dbgtool_init_flag = 0;
+}
+
+/*lint -restore*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h
new file mode 100644
index 000000000000..d345f75dc97b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __DBGTOOL_KNL_H__
+#define __DBGTOOL_KNL_H__
+
+#define DBG_TOOL_MAGIC 'w'
+
+/* dbgtool command type */
+/* You can add the required dbgtool through these commands
+ * can invoke all X86 kernel mode driver interface
+ */
+enum dbgtool_cmd {
+ DBGTOOL_CMD_API_RD = 0,
+ DBGTOOL_CMD_API_WR,
+
+ DBGTOOL_CMD_FFM_RD,
+ DBGTOOL_CMD_FFM_CLR,
+
+ DBGTOOL_CMD_PF_DEV_INFO_GET,
+
+ DBGTOOL_CMD_MSG_2_UP,
+
+ DBGTOOL_CMD_FREE_MEM,
+ DBGTOOL_CMD_NUM
+};
+
+struct api_cmd_rd {
+ u32 pf_id;
+ u8 dest;
+ u8 *cmd;
+ u16 size;
+ void *ack;
+ u16 ack_size;
+};
+
+struct api_cmd_wr {
+ u32 pf_id;
+ u8 dest;
+ u8 *cmd;
+ u16 size;
+};
+
+struct pf_dev_info {
+ u64 bar0_size;
+ u8 bus;
+ u8 slot;
+ u8 func;
+ u64 phy_addr;
+};
+
+/* Interrupt at most records, interrupt will be recorded in the FFM */
+#define FFM_RECORD_NUM_MAX 64
+
+struct ffm_intr_tm_info {
+ u8 node_id;
+ /* error level of the interrupt source */
+ u8 err_level;
+ /* Classification by interrupt source properties */
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+
+ u8 sec; /* second*/
+ u8 min; /* minute */
+ u8 hour; /* hour */
+ u8 mday; /* day */
+ u8 mon; /* month */
+ u16 year; /* year */
+};
+
+struct ffm_record_info {
+ u32 ffm_num;
+ struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX];
+};
+
+struct msg_2_up {
+ u8 pf_id; /* which pf sends messages to the up */
+ u8 mod;
+ u8 cmd;
+ void *buf_in;
+ u16 in_size;
+ void *buf_out;
+ u16 *out_size;
+};
+
+struct dbgtool_param {
+ union {
+ struct api_cmd_rd api_rd;
+ struct api_cmd_wr api_wr;
+ struct pf_dev_info *dev_info;
+ struct ffm_record_info *ffm_rd;
+ struct msg_2_up msg2up;
+ } param;
+ char chip_name[16];
+};
+
+#define MAX_CARD_NUM 64
+#define DBGTOOL_PAGE_ORDER 10
+
+int hinic_dbgtool_knl_init(void *vhwdev, void *chip_node);
+void hinic_dbgtool_knl_deinit(void *vhwdev, void *chip_node);
+int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma);
+void hinic_chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_id,
+ void **g_func_handle_array);
+long hinic_dbgtool_knl_free_mem(int id);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.c b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c
new file mode 100644
index 000000000000..0a6306ec8f6f
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c
@@ -0,0 +1,1796 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_lld.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic_dev.h"
+#include "hinic_dcb.h"
+
+#define DCB_HW_CFG_CHG 0
+#define DCB_HW_CFG_NO_CHG 1
+#define DCB_HW_CFG_ERR 2
+
+#define DCB_CFG_CHG_PG_TX 0x1
+#define DCB_CFG_CHG_PG_RX 0x2
+#define DCB_CFG_CHG_PFC 0x4
+#define DCB_CFG_CHG_UP_COS 0x8
+
+u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up)
+{
+ struct hinic_tc_cfg *tc_cfg = &dcb_cfg->tc_cfg[0];
+ u8 tc = dcb_cfg->pg_tcs;
+
+ if (!tc)
+ return 0;
+
+ for (tc--; tc; tc--) {
+ if (BIT(up) & tc_cfg[tc].path[dir].up_map)
+ break;
+ }
+
+ return tc;
+}
+
+#define UP_MAPPING(prio) ((u8)(1U << ((HINIC_DCB_UP_MAX - 1) - (prio))))
+
+void hinic_dcb_config_init(struct hinic_nic_dev *nic_dev,
+ struct hinic_dcb_config *dcb_cfg)
+{
+ struct hinic_tc_cfg *tc;
+ int i;
+
+ memset(dcb_cfg->tc_cfg, 0, sizeof(dcb_cfg->tc_cfg));
+ tc = &dcb_cfg->tc_cfg[0];
+ /* All TC mapping to PG0 */
+ for (i = 0; i < dcb_cfg->pg_tcs; i++) {
+ tc = &dcb_cfg->tc_cfg[i];
+ tc->path[HINIC_DCB_CFG_TX].pg_id = 0;
+ tc->path[HINIC_DCB_CFG_TX].bw_pct = 100;
+ tc->path[HINIC_DCB_CFG_TX].up_map = UP_MAPPING(i);
+ tc->path[HINIC_DCB_CFG_RX].pg_id = 0;
+ tc->path[HINIC_DCB_CFG_RX].bw_pct = 100;
+ tc->path[HINIC_DCB_CFG_RX].up_map = UP_MAPPING(i);
+
+ tc->pfc_en = false;
+ }
+
+ for (; i < HINIC_DCB_UP_MAX; i++) {
+ tc->path[HINIC_DCB_CFG_TX].up_map |= UP_MAPPING(i);
+ tc->path[HINIC_DCB_CFG_RX].up_map |= UP_MAPPING(i);
+ }
+
+ memset(dcb_cfg->bw_pct, 0, sizeof(dcb_cfg->bw_pct));
+ /* Use PG0 in default, PG0's bw is 100% */
+ dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][0] = 100;
+ dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][0] = 100;
+ dcb_cfg->pfc_state = false;
+}
+
+void hinic_init_ieee_settings(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct ieee_ets *ets = &nic_dev->hinic_ieee_ets_default;
+ struct ieee_pfc *pfc = &nic_dev->hinic_ieee_pfc;
+ struct hinic_tc_attr *tc_attr;
+ u8 i;
+
+ memset(ets, 0x0, sizeof(struct ieee_ets));
+ memset(&nic_dev->hinic_ieee_ets, 0x0, sizeof(struct ieee_ets));
+ ets->ets_cap = dcb_cfg->pg_tcs;
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ tc_attr = &dcb_cfg->tc_cfg[i].path[HINIC_DCB_CFG_TX];
+ ets->tc_tsa[i] = tc_attr->prio_type ?
+ IEEE8021Q_TSA_STRICT : IEEE8021Q_TSA_ETS;
+ ets->tc_tx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i];
+ ets->tc_rx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_RX][i];
+ ets->prio_tc[i] = hinic_dcb_get_tc(dcb_cfg,
+ HINIC_DCB_CFG_TX, i);
+ }
+ memcpy(&nic_dev->hinic_ieee_ets, ets, sizeof(struct ieee_ets));
+
+ memset(pfc, 0x0, sizeof(struct ieee_pfc));
+ pfc->pfc_cap = dcb_cfg->pfc_tcs;
+ for (i = 0; i < dcb_cfg->pfc_tcs; i++) {
+ if (dcb_cfg->tc_cfg[i].pfc_en)
+ pfc->pfc_en |= (u8)BIT(i);
+ }
+}
+
+static int hinic_set_up_cos_map(struct hinic_nic_dev *nic_dev,
+ u8 num_cos, u8 *cos_up)
+{
+ u8 up_valid_bitmap, up_cos[HINIC_DCB_UP_MAX] = {0};
+ u8 i;
+
+ up_valid_bitmap = 0;
+ for (i = 0; i < num_cos; i++) {
+ if (cos_up[i] >= HINIC_DCB_UP_MAX) {
+ hinic_info(nic_dev, drv, "Invalid up %d mapping to cos %d\n",
+ cos_up[i], i);
+ return -EFAULT;
+ }
+
+ if (i > 0 && cos_up[i] >= cos_up[i - 1]) {
+ hinic_info(nic_dev, drv,
+ "Invalid priority order, should be descending cos[%d]=%d, cos[%d]=%d\n",
+ i, cos_up[i], i - 1, cos_up[i - 1]);
+ return -EINVAL;
+ }
+
+ up_valid_bitmap |= (u8)BIT(cos_up[i]);
+ if (i == (num_cos - 1))
+ up_cos[cos_up[i]] = nic_dev->default_cos_id;
+ else
+ up_cos[cos_up[i]] = i; /* reverse up and cos */
+ }
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (up_valid_bitmap & (u8)BIT(i))
+ continue;
+
+ up_cos[i] = nic_dev->default_cos_id;
+ }
+
+ nic_dev->up_valid_bitmap = up_valid_bitmap;
+ memcpy(nic_dev->up_cos, up_cos, sizeof(up_cos));
+
+ return hinic_sq_cos_mapping(nic_dev->netdev);
+}
+
+static int hinic_init_up_cos_map(struct hinic_nic_dev *nic_dev, u8 num_cos)
+{
+ u8 default_map[HINIC_DCB_COS_MAX] = {0};
+ bool setted = false;
+ u8 max_cos, cos_id, up;
+ int err;
+
+ max_cos = hinic_max_num_cos(nic_dev->hwdev);
+ if (!max_cos || ((max_cos - 1) < nic_dev->default_cos_id)) {
+ hinic_err(nic_dev, drv, "Max_cos is %d, default cos id %d\n",
+ max_cos, nic_dev->default_cos_id);
+ return -EFAULT;
+ }
+
+ err = hinic_get_chip_cos_up_map(nic_dev->pdev, &setted, default_map);
+ if (err) {
+ hinic_err(nic_dev, drv, "Get chip cos_up map failed\n");
+ return -EFAULT;
+ }
+
+ if (!setted) {
+ /* Use (max_cos-1)~0 as default user priority and mapping
+ * to cos0~(max_cos-1)
+ */
+ up = nic_dev->max_cos - 1;
+ for (cos_id = 0; cos_id < nic_dev->max_cos; cos_id++, up--)
+ default_map[cos_id] = up;
+ }
+
+ return hinic_set_up_cos_map(nic_dev, num_cos, default_map);
+}
+
+int hinic_dcb_init(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ u8 num_cos, support_cos = 0, default_cos = 0;
+ u8 i, cos_valid_bitmap;
+ int err;
+
+ if (HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ return 0;
+
+ cos_valid_bitmap = hinic_cos_valid_bitmap(nic_dev->hwdev);
+ if (!cos_valid_bitmap) {
+ hinic_err(nic_dev, drv, "None cos supported\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
+ if (cos_valid_bitmap & BIT(i)) {
+ support_cos++;
+ default_cos = i; /* Find max cos id as default cos */
+ }
+ }
+
+ hinic_info(nic_dev, drv, "Support num cos %d, default cos %d\n",
+ support_cos, default_cos);
+
+ num_cos = (u8)(1U << ilog2(support_cos));
+ if (num_cos != support_cos)
+ hinic_info(nic_dev, drv, "Adjust num_cos from %d to %d\n",
+ support_cos, num_cos);
+
+ nic_dev->dcbx_cap = 0;
+ nic_dev->max_cos = num_cos;
+ nic_dev->default_cos_id = default_cos;
+ dcb_cfg->pfc_tcs = nic_dev->max_cos;
+ dcb_cfg->pg_tcs = nic_dev->max_cos;
+ err = hinic_init_up_cos_map(nic_dev, num_cos);
+ if (err) {
+ hinic_info(nic_dev, drv, "Initialize up_cos mapping failed\n");
+ return -EFAULT;
+ }
+
+ hinic_dcb_config_init(nic_dev, dcb_cfg);
+
+ nic_dev->dcb_changes = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX |
+ DCB_CFG_CHG_PG_RX | DCB_CFG_CHG_UP_COS;
+ nic_dev->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+
+ memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->dcb_cfg,
+ sizeof(nic_dev->tmp_dcb_cfg));
+ memcpy(&nic_dev->save_dcb_cfg, &nic_dev->dcb_cfg,
+ sizeof(nic_dev->save_dcb_cfg));
+
+ hinic_init_ieee_settings(nic_dev);
+
+ sema_init(&nic_dev->dcb_sem, 1);
+
+ return 0;
+}
+
+void hinic_set_prio_tc_map(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ u8 prio, tc;
+
+ for (prio = 0; prio < HINIC_DCB_UP_MAX; prio++) {
+ tc = nic_dev->up_cos[prio];
+ if (tc == nic_dev->default_cos_id)
+ tc = nic_dev->max_cos - 1;
+
+ netdev_set_prio_tc_map(netdev, prio, tc);
+ }
+}
+
+int hinic_setup_tc(struct net_device *netdev, u8 tc)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ if (!FUNC_SUPPORT_DCB(nic_dev->hwdev)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Current function don't support DCB\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (tc > nic_dev->dcb_cfg.pg_tcs) {
+ nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %d, max tc: %d\n",
+ tc, nic_dev->dcb_cfg.pg_tcs);
+ return -EINVAL;
+ }
+
+ if (netif_running(netdev)) {
+ err = hinic_close(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to close device\n");
+ return -EFAULT;
+ }
+ }
+
+ if (tc) {
+ if (tc & (tc - 1)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Invalid num_tc: %d, must be power of 2\n",
+ tc);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(netdev, tc);
+ hinic_set_prio_tc_map(nic_dev);
+
+ set_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ } else {
+ netdev_reset_tc(netdev);
+
+ clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ }
+
+ hinic_sq_cos_mapping(netdev);
+
+ if (netif_running(netdev)) {
+ err = hinic_open(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to open device\n");
+ return -EFAULT;
+ }
+ } else {
+ hinic_update_num_qps(netdev);
+ }
+
+ hinic_configure_dcb(netdev);
+
+ return 0;
+}
+
+u8 hinic_setup_dcb_tool(struct net_device *netdev, u8 *dcb_en, bool wr_flag)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
+
+ if (wr_flag) {
+ if (nic_dev->max_qps < nic_dev->dcb_cfg.pg_tcs && *dcb_en) {
+ nicif_err(nic_dev, drv, netdev,
+ "max_qps: %d is less than %d\n",
+ nic_dev->max_qps, nic_dev->dcb_cfg.pg_tcs);
+ return 1;
+ }
+ if (*dcb_en)
+ set_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ else
+ clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ /*hinic_setup_tc need get the nic_mutex lock again */
+ mutex_unlock(&nic_dev->nic_mutex);
+ /* kill the rtnl assert warning */
+ rtnl_lock();
+ err = hinic_setup_tc(netdev,
+ *dcb_en ? nic_dev->dcb_cfg.pg_tcs : 0);
+ rtnl_unlock();
+ mutex_lock(&nic_dev->nic_mutex);
+
+ if (!err)
+ nicif_info(nic_dev, drv, netdev, "%s DCB\n",
+ *dcb_en ? "Enable" : "Disable");
+ } else {
+ *dcb_en = (u8)test_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ }
+
+ return !!err;
+}
+
+static u8 hinic_dcbnl_get_state(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+}
+
+static u8 hinic_dcbnl_set_state(struct net_device *netdev, u8 state)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 curr_state = !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ int err = 0;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
+ if (state == curr_state)
+ return 0;
+
+ if (nic_dev->max_qps < nic_dev->dcb_cfg.pg_tcs && state) {
+ nicif_err(nic_dev, drv, netdev,
+ "max_qps: %d is less than %d\n",
+ nic_dev->max_qps, nic_dev->dcb_cfg.pg_tcs);
+ return 1;
+ }
+
+ err = hinic_setup_tc(netdev, state ? nic_dev->dcb_cfg.pg_tcs : 0);
+ if (!err)
+ nicif_info(nic_dev, drv, netdev, "%s DCB\n",
+ state ? "Enable" : "Disable");
+
+ return !!err;
+}
+
+static void hinic_dcbnl_get_perm_hw_addr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ err = hinic_get_default_mac(nic_dev->hwdev, netdev);
+ if (err)
+ nicif_err(nic_dev, drv, netdev, "Failed to get default mac\n");
+}
+
+void hinic_dcbnl_set_ets_tc_tool(struct net_device *netdev, u8 tc[], bool flag)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_tc_cfg *cfg = nic_dev->tmp_dcb_cfg.tc_cfg;
+ struct hinic_tc_cfg *tc_conf = nic_dev->dcb_cfg.tc_cfg;
+ u8 i, tc_tmp, j;
+
+ if (flag) {
+ /*need to clear first */
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ cfg[i].path[HINIC_DCB_CFG_TX].up_map = 0;
+ cfg[i].path[HINIC_DCB_CFG_RX].up_map = 0;
+ }
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ tc_tmp = tc[i];
+ cfg[tc_tmp].path[HINIC_DCB_CFG_TX].up_map |= (u8)BIT(i);
+ cfg[tc_tmp].path[HINIC_DCB_CFG_RX].up_map |= (u8)BIT(i);
+ cfg[tc_tmp].path[HINIC_DCB_CFG_TX].pg_id = (u8)tc_tmp;
+ cfg[tc_tmp].path[HINIC_DCB_CFG_RX].pg_id = (u8)tc_tmp;
+ }
+ } else {
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ for (j = 0; j < HINIC_DCB_TC_MAX; j++) {
+ if (tc_conf[i].path[HINIC_DCB_CFG_TX].up_map &
+ (u8)BIT(j)) {
+ tc[j] = i;
+ }
+ }
+ }
+ }
+}
+
+void hinic_dcbnl_set_ets_pecent_tool(struct net_device *netdev,
+ u8 percent[], bool flag)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int i;
+
+ if (flag) {
+ for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
+ nic_dev->tmp_dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i] =
+ percent[i];
+ nic_dev->tmp_dcb_cfg.bw_pct[HINIC_DCB_CFG_RX][i] =
+ percent[i];
+ }
+ } else {
+ for (i = 0; i < HINIC_DCB_COS_MAX; i++)
+ percent[i] =
+ nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i];
+ }
+}
+
+static void hinic_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 prio, u8 pg_id, u8 bw_pct,
+ u8 up_map)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (tc > HINIC_DCB_TC_MAX - 1)
+ return;
+
+ if (prio != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].prio_type = prio;
+ if (pg_id != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].pg_id = pg_id;
+ if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].bw_pct = bw_pct;
+ /* if all priority mapping to the same tc,
+ * up_map is 0xFF, and it's a valid value
+ */
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].up_map = up_map;
+}
+
+static void hinic_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
+ u8 bw_pct)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (bwg_id > HINIC_DCB_PG_MAX - 1)
+ return;
+
+ nic_dev->tmp_dcb_cfg.bw_pct[0][bwg_id] = bw_pct;
+}
+
+static void hinic_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
+ u8 prio, u8 pg_id, u8 bw_pct,
+ u8 up_map)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (tc > HINIC_DCB_TC_MAX - 1)
+ return;
+
+ if (prio != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].prio_type = prio;
+ if (pg_id != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].pg_id = pg_id;
+ if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].bw_pct = bw_pct;
+
+ nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].up_map = up_map;
+}
+
+static void hinic_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
+ u8 bw_pct)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (bwg_id > HINIC_DCB_PG_MAX - 1)
+ return;
+
+ nic_dev->tmp_dcb_cfg.bw_pct[1][bwg_id] = bw_pct;
+}
+
+static void hinic_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 *prio, u8 *pg_id, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (tc > HINIC_DCB_TC_MAX - 1)
+ return;
+
+ *prio = nic_dev->dcb_cfg.tc_cfg[tc].path[0].prio_type;
+ *pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[0].pg_id;
+ *bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[0].bw_pct;
+ *up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[0].up_map;
+}
+
+static void hinic_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
+ u8 *bw_pct)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (bwg_id > HINIC_DCB_PG_MAX - 1)
+ return;
+
+ *bw_pct = nic_dev->dcb_cfg.bw_pct[0][bwg_id];
+}
+
+static void hinic_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
+ u8 *prio, u8 *pg_id, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (tc > HINIC_DCB_TC_MAX - 1)
+ return;
+
+ *prio = nic_dev->dcb_cfg.tc_cfg[tc].path[1].prio_type;
+ *pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[1].pg_id;
+ *bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[1].bw_pct;
+ *up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[1].up_map;
+}
+
+static void hinic_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
+ u8 *bw_pct)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (bwg_id > HINIC_DCB_PG_MAX - 1)
+ return;
+
+ *bw_pct = nic_dev->dcb_cfg.bw_pct[1][bwg_id];
+}
+
+void hinic_dcbnl_set_pfc_cfg_tool(struct net_device *netdev, u8 setting)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 i;
+
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ nic_dev->tmp_dcb_cfg.tc_cfg[i].pfc_en = !!(setting & BIT(i));
+ if (nic_dev->tmp_dcb_cfg.tc_cfg[i].pfc_en !=
+ nic_dev->dcb_cfg.tc_cfg[i].pfc_en) {
+ nic_dev->tmp_dcb_cfg.pfc_state = true;
+ }
+ }
+}
+
+void hinic_dcbnl_set_ets_strict_tool(struct net_device *netdev,
+ u8 *setting, bool flag)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_tc_cfg *cfg = nic_dev->tmp_dcb_cfg.tc_cfg;
+ struct hinic_tc_cfg *conf = nic_dev->dcb_cfg.tc_cfg;
+ u8 i;
+
+ if (flag) {
+ for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
+ cfg[i].path[HINIC_DCB_CFG_TX].prio_type =
+ !!(*setting & BIT(i)) ? 2 : 0;
+ cfg[i].path[HINIC_DCB_CFG_RX].prio_type =
+ !!(*setting & BIT(i)) ? 2 : 0;
+ }
+ } else {
+ for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
+ *setting = *setting |
+ (u8)((u32)(!!(conf[i].path[0].prio_type)) << i);
+ }
+ }
+}
+
+void hinic_dcbnl_set_pfc_en_tool(struct net_device *netdev,
+ u8 *value, bool flag)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (flag)
+ nic_dev->tmp_dcb_cfg.pfc_state = !!(*value);
+ else
+ *value = nic_dev->tmp_dcb_cfg.pfc_state;
+}
+
+void hinic_dcbnl_set_ets_en_tool(struct net_device *netdev,
+ u8 *value, bool flag)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (flag) {
+ if (*value)
+ set_bit(HINIC_ETS_ENABLE, &nic_dev->flags);
+ else
+ clear_bit(HINIC_ETS_ENABLE, &nic_dev->flags);
+ } else {
+ *value = (u8)test_bit(HINIC_ETS_ENABLE, &nic_dev->flags);
+ }
+}
+
+static void hinic_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en = !!setting;
+ if (nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en !=
+ nic_dev->dcb_cfg.tc_cfg[prio].pfc_en)
+ nic_dev->tmp_dcb_cfg.pfc_state = true;
+}
+
+void hinic_dcbnl_get_pfc_cfg_tool(struct net_device *netdev, u8 *setting)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 i;
+
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ *setting = *setting |
+ (u8)((u32)(nic_dev->dcb_cfg.tc_cfg[i].pfc_en) << i);
+ }
+}
+
+void hinic_dcbnl_get_tc_num_tool(struct net_device *netdev, u8 *tc_num)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ *tc_num = nic_dev->max_cos;
+}
+
+static void hinic_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (prio > HINIC_DCB_TC_MAX - 1)
+ return;
+
+ *setting = nic_dev->dcb_cfg.tc_cfg[prio].pfc_en;
+}
+
+static u8 hinic_dcbnl_getcap(struct net_device *netdev, int cap_id,
+ u8 *dcb_cap)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ switch (cap_id) {
+ case DCB_CAP_ATTR_PG:
+ *dcb_cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *dcb_cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *dcb_cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *dcb_cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *dcb_cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *dcb_cap = true;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *dcb_cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *dcb_cap = nic_dev->dcbx_cap;
+ break;
+ default:
+ *dcb_cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+static u8 hinic_sync_tc_cfg(struct hinic_tc_cfg *tc_dst,
+ struct hinic_tc_cfg *tc_src, int dir)
+{
+ u8 tc_dir_change = (dir == HINIC_DCB_CFG_TX) ?
+ DCB_CFG_CHG_PG_TX : DCB_CFG_CHG_PG_RX;
+ u8 changes = 0;
+
+ if (tc_dst->path[dir].prio_type != tc_src->path[dir].prio_type) {
+ tc_dst->path[dir].prio_type = tc_src->path[dir].prio_type;
+ changes |= tc_dir_change;
+ }
+
+ if (tc_dst->path[dir].pg_id != tc_src->path[dir].pg_id) {
+ tc_dst->path[dir].pg_id = tc_src->path[dir].pg_id;
+ changes |= tc_dir_change;
+ }
+
+ if (tc_dst->path[dir].bw_pct != tc_src->path[dir].bw_pct) {
+ tc_dst->path[dir].bw_pct = tc_src->path[dir].bw_pct;
+ changes |= tc_dir_change;
+ }
+
+ if (tc_dst->path[dir].up_map != tc_src->path[dir].up_map) {
+ tc_dst->path[dir].up_map = tc_src->path[dir].up_map;
+ changes |= (tc_dir_change | DCB_CFG_CHG_PFC);
+ }
+
+ return changes;
+}
+
+static u8 hinic_sync_dcb_cfg(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct hinic_dcb_config *tmp_dcb_cfg = &nic_dev->tmp_dcb_cfg;
+ struct hinic_tc_cfg *tc_dst, *tc_src;
+ u8 changes = 0;
+ int i;
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ tc_src = &tmp_dcb_cfg->tc_cfg[i];
+ tc_dst = &dcb_cfg->tc_cfg[i];
+
+ changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_TX);
+ changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_RX);
+ }
+
+ for (i = 0; i < HINIC_DCB_PG_MAX; i++) {
+ if (dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] !=
+ tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i]) {
+ dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] =
+ tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i];
+ changes |= DCB_CFG_CHG_PG_TX;
+ }
+
+ if (dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] !=
+ tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i]) {
+ dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] =
+ tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i];
+ changes |= DCB_CFG_CHG_PG_RX;
+ }
+ }
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (dcb_cfg->tc_cfg[i].pfc_en !=
+ tmp_dcb_cfg->tc_cfg[i].pfc_en) {
+ dcb_cfg->tc_cfg[i].pfc_en =
+ tmp_dcb_cfg->tc_cfg[i].pfc_en;
+ changes |= DCB_CFG_CHG_PFC;
+ }
+ }
+
+ if (dcb_cfg->pfc_state != tmp_dcb_cfg->pfc_state) {
+ dcb_cfg->pfc_state = tmp_dcb_cfg->pfc_state;
+ changes |= DCB_CFG_CHG_PFC;
+ }
+
+ return changes;
+}
+
+static void hinic_dcb_get_pfc_map(struct hinic_nic_dev *nic_dev,
+ struct hinic_dcb_config *dcb_cfg, u8 *pfc_map)
+{
+ u8 i, up;
+ u8 pfc_en = 0, outof_range_pfc = 0;
+
+ for (i = 0; i < dcb_cfg->pfc_tcs; i++) {
+ up = (HINIC_DCB_UP_MAX - 1) - i;
+ if (dcb_cfg->tc_cfg[up].pfc_en)
+ *pfc_map |= (u8)BIT(up);
+ }
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ up = (HINIC_DCB_UP_MAX - 1) - i;
+ if (dcb_cfg->tc_cfg[up].pfc_en)
+ pfc_en |= (u8)BIT(up);
+ }
+
+ *pfc_map = pfc_en & nic_dev->up_valid_bitmap;
+ outof_range_pfc = pfc_en & (~nic_dev->up_valid_bitmap);
+
+ if (outof_range_pfc)
+ hinic_info(nic_dev, drv,
+ "PFC setting out of range, 0x%x will be ignored\n",
+ outof_range_pfc);
+}
+
+static bool is_cos_in_use(u8 cos, u8 up_valid_bitmap, u8 *up_cos)
+{
+ u32 i;
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (!(up_valid_bitmap & BIT(i)))
+ continue;
+
+ if (cos == up_cos[i])
+ return true;
+ }
+
+ return false;
+}
+
+static void hinic_dcb_adjust_up_bw(struct hinic_nic_dev *nic_dev, u8 *up_pgid,
+ u8 *up_bw)
+{
+ u8 tmp_cos, pg_id;
+ u16 bw_all;
+ u8 bw_remain, cos_cnt;
+
+ for (pg_id = 0; pg_id < HINIC_DCB_PG_MAX; pg_id++) {
+ bw_all = 0;
+ cos_cnt = 0;
+ /* Find all up mapping to the same pg */
+ for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) {
+ if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap,
+ nic_dev->up_cos))
+ continue;
+
+ if (up_pgid[tmp_cos] == pg_id) {
+ bw_all += up_bw[tmp_cos];
+ cos_cnt++;
+ }
+ }
+
+ if (bw_all <= 100 || !cos_cnt)
+ continue;
+
+ /* Calculate up percent of bandwidth group, The sum of
+ * percentages for priorities in the same priority group
+ * must be 100
+ */
+ bw_remain = 100 % cos_cnt;
+ for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) {
+ if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap,
+ nic_dev->up_cos))
+ continue;
+
+ if (up_pgid[tmp_cos] == pg_id) {
+ up_bw[tmp_cos] =
+ (u8)(100 * up_bw[tmp_cos] / bw_all +
+ (u8)!!bw_remain);
+ if (bw_remain)
+ bw_remain--;
+ }
+ }
+ }
+}
+
+static void hinic_dcb_dump_configuration(struct hinic_nic_dev *nic_dev,
+ u8 *up_tc, u8 *up_pgid, u8 *up_bw,
+ u8 *pg_bw, u8 *up_strict, u8 *bw_pct)
+{
+ u8 i;
+ u8 cos;
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (!(nic_dev->up_valid_bitmap & BIT(i)))
+ continue;
+
+ cos = nic_dev->up_cos[i];
+ hinic_info(nic_dev, drv,
+ "up: %d, cos: %d, tc: %d, pgid: %d, bw: %d, tsa: %d\n",
+ i, cos, up_tc[cos], up_pgid[cos], up_bw[cos],
+ up_strict[cos]);
+ }
+
+ for (i = 0; i < HINIC_DCB_PG_MAX; i++)
+ hinic_info(nic_dev, drv, "pgid: %d, bw: %d\n", i, pg_bw[i]);
+}
+
+/* Ucode thread timeout is 210ms, must be lagger then 210ms */
+#define HINIC_WAIT_PORT_IO_STOP 250
+
+static int hinic_stop_port_traffic_flow(struct hinic_nic_dev *nic_dev)
+{
+ int err = 0;
+
+ down(&nic_dev->dcb_sem);
+
+ if (nic_dev->disable_port_cnt++ != 0)
+ goto out;
+
+ err = hinic_force_port_disable(nic_dev);
+ if (err) {
+ hinic_err(nic_dev, drv, "Failed to disable port\n");
+ goto set_port_err;
+ }
+
+ err = hinic_set_port_funcs_state(nic_dev->hwdev, false);
+ if (err) {
+ hinic_err(nic_dev, drv,
+ "Failed to disable all functions in port\n");
+ goto set_port_funcs_err;
+ }
+
+ hinic_info(nic_dev, drv, "Stop port traffic flow\n");
+
+ goto out;
+
+set_port_funcs_err:
+ hinic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev));
+
+set_port_err:
+out:
+ if (err)
+ nic_dev->disable_port_cnt--;
+
+ up(&nic_dev->dcb_sem);
+
+ return err;
+}
+
+static int hinic_start_port_traffic_flow(struct hinic_nic_dev *nic_dev)
+{
+ int err;
+
+ down(&nic_dev->dcb_sem);
+
+ nic_dev->disable_port_cnt--;
+ if (nic_dev->disable_port_cnt > 0) {
+ up(&nic_dev->dcb_sem);
+ return 0;
+ }
+
+ nic_dev->disable_port_cnt = 0;
+ up(&nic_dev->dcb_sem);
+
+ err = hinic_force_set_port_state(nic_dev,
+ !!netif_running(nic_dev->netdev));
+ if (err)
+ hinic_err(nic_dev, drv, "Failed to disable port\n");
+
+ err = hinic_set_port_funcs_state(nic_dev->hwdev, true);
+ if (err)
+ hinic_err(nic_dev, drv,
+ "Failed to disable all functions in port\n");
+
+ hinic_info(nic_dev, drv, "Start port traffic flow\n");
+
+ return err;
+}
+
+static int __set_hw_cos_up_map(struct hinic_nic_dev *nic_dev)
+{
+ u8 cos, cos_valid_bitmap, cos_up_map[HINIC_DCB_COS_MAX] = {0};
+ u8 i;
+ int err;
+
+ cos_valid_bitmap = 0;
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (!(nic_dev->up_valid_bitmap & BIT(i)))
+ continue;
+
+ cos = nic_dev->up_cos[i];
+ cos_up_map[cos] = i;
+ cos_valid_bitmap |= (u8)BIT(cos);
+ }
+
+ err = hinic_dcb_set_cos_up_map(nic_dev->hwdev, cos_valid_bitmap,
+ cos_up_map);
+ if (err) {
+ hinic_info(nic_dev, drv, "Set cos_up map failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __set_hw_ets(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
+ struct hinic_tc_attr *tc_attr;
+ u8 up_tc[HINIC_DCB_UP_MAX] = {0};
+ u8 up_pgid[HINIC_DCB_UP_MAX] = {0};
+ u8 up_bw[HINIC_DCB_UP_MAX] = {0};
+ u8 pg_bw[HINIC_DCB_UP_MAX] = {0};
+ u8 up_strict[HINIC_DCB_UP_MAX] = {0};
+ u8 i, tc, cos;
+ int err;
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (!(nic_dev->up_valid_bitmap & BIT(i)))
+ continue;
+
+ cos = nic_dev->up_cos[i];
+ if ((nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) {
+ up_tc[cos] = my_ets->prio_tc[i];
+ up_pgid[cos] = my_ets->prio_tc[i];
+ up_bw[cos] = 100;
+ up_strict[i] =
+ (my_ets->tc_tsa[cos] == IEEE8021Q_TSA_STRICT) ?
+ HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR;
+
+ } else {
+ tc = hinic_dcb_get_tc(dcb_cfg, HINIC_DCB_CFG_TX, i);
+ tc_attr = &dcb_cfg->tc_cfg[tc].path[HINIC_DCB_CFG_TX];
+ up_tc[cos] = tc;
+ up_pgid[cos] = tc_attr->pg_id;
+ up_bw[cos] = tc_attr->bw_pct;
+ up_strict[cos] = tc_attr->prio_type ?
+ HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR;
+ }
+ }
+
+ hinic_dcb_adjust_up_bw(nic_dev, up_pgid, up_bw);
+
+ if (nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
+ for (i = 0; i < HINIC_DCB_PG_MAX; i++)
+ pg_bw[i] = my_ets->tc_tx_bw[i];
+ } else {
+ for (i = 0; i < HINIC_DCB_PG_MAX; i++)
+ pg_bw[i] = dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i];
+ }
+
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
+ hinic_dcb_dump_configuration(nic_dev, up_tc, up_pgid,
+ up_bw, pg_bw, up_strict,
+ pg_bw);
+
+ err = hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, up_pgid,
+ up_bw, up_strict);
+ if (err) {
+ hinic_err(nic_dev, drv, "Failed to set ets with mode: %d\n",
+ nic_dev->dcbx_cap);
+ return err;
+ }
+
+ hinic_info(nic_dev, drv, "Set ets to hw done with mode: %d\n",
+ nic_dev->dcbx_cap);
+
+ return 0;
+}
+
+u8 hinic_dcbnl_set_ets_tool(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 state = DCB_HW_CFG_CHG;
+ int err;
+
+ nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev);
+ if (!nic_dev->dcb_changes)
+ return DCB_HW_CFG_CHG;
+
+ err = hinic_stop_port_traffic_flow(nic_dev);
+ if (err)
+ return DCB_HW_CFG_ERR;
+ /* wait all traffic flow stopped */
+ if (netdev->reg_state == NETREG_REGISTERED)
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) {
+ err = __set_hw_cos_up_map(nic_dev);
+ if (err) {
+ hinic_info(nic_dev, drv,
+ "Set cos_up map to hardware failed\n");
+ state = DCB_HW_CFG_ERR;
+ goto out;
+ }
+
+ nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS);
+ }
+
+ if (nic_dev->dcb_changes & (DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)) {
+ err = __set_hw_ets(nic_dev);
+ if (err) {
+ state = DCB_HW_CFG_ERR;
+ goto out;
+ }
+
+ nic_dev->dcb_changes &=
+ (~(DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX));
+ }
+
+out:
+ hinic_start_port_traffic_flow(nic_dev);
+
+ return state;
+}
+
+static int hinic_dcbnl_set_df_ieee_cfg(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct ieee_ets *ets_default = &nic_dev->hinic_ieee_ets_default;
+ struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc;
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
+ struct ieee_pfc pfc = {0};
+ int err1 = 0;
+ int err2 = 0;
+ u8 flag = 0;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return 0;
+
+ if (memcmp(my_ets, ets_default, sizeof(struct ieee_ets)))
+ flag |= (u8)BIT(0);
+
+ if (my_pfc->pfc_en)
+ flag |= (u8)BIT(1);
+ if (!flag)
+ return 0;
+
+ err1 = hinic_stop_port_traffic_flow(nic_dev);
+ if (err1)
+ return err1;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ if (flag & BIT(0)) {
+ memcpy(my_ets, ets_default, sizeof(struct ieee_ets));
+ err1 = __set_hw_ets(nic_dev);
+ }
+ if (flag & BIT(1)) {
+ my_pfc->pfc_en = 0;
+ err2 = hinic_dcb_set_pfc(nic_dev->hwdev, false, pfc.pfc_en);
+ if (err2)
+ nicif_err(nic_dev, drv, netdev, "Failed to set pfc\n");
+ }
+
+ hinic_start_port_traffic_flow(nic_dev);
+
+ return (err1 || err2) ? -EINVAL : 0;
+}
+
+u8 hinic_dcbnl_set_pfc_tool(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ u8 state = DCB_HW_CFG_CHG;
+ int err;
+
+ nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev);
+ if (!nic_dev->dcb_changes)
+ return DCB_HW_CFG_CHG;
+
+ if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) {
+ u8 pfc_map = 0;
+
+ hinic_dcb_get_pfc_map(nic_dev, dcb_cfg, &pfc_map);
+ err = hinic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state,
+ pfc_map);
+ if (err) {
+ hinic_info(nic_dev, drv, "Failed to %s PFC\n",
+ dcb_cfg->pfc_state ? "enable" : "disable");
+ state = DCB_HW_CFG_ERR;
+ goto out;
+ }
+
+ if (dcb_cfg->pfc_state)
+ hinic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n",
+ pfc_map);
+ else
+ hinic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n");
+
+ nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC);
+ }
+out:
+
+ return state;
+}
+
+u8 hinic_dcbnl_set_all(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ u8 state = DCB_HW_CFG_CHG;
+ int err;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return DCB_HW_CFG_ERR;
+
+ nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev);
+ if (!nic_dev->dcb_changes)
+ return DCB_HW_CFG_NO_CHG;
+
+ err = hinic_stop_port_traffic_flow(nic_dev);
+ if (err)
+ return DCB_HW_CFG_ERR;
+ /* wait all traffic flow stopped */
+ if (netdev->reg_state == NETREG_REGISTERED)
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) {
+ err = __set_hw_cos_up_map(nic_dev);
+ if (err) {
+ hinic_info(nic_dev, drv,
+ "Set cos_up map to hardware failed\n");
+ state = DCB_HW_CFG_ERR;
+ goto out;
+ }
+
+ nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS);
+ }
+
+ if (nic_dev->dcb_changes & (DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)) {
+ err = __set_hw_ets(nic_dev);
+ if (err) {
+ state = DCB_HW_CFG_ERR;
+ goto out;
+ }
+
+ nic_dev->dcb_changes &=
+ (~(DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX));
+ }
+
+ if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) {
+ u8 pfc_map = 0;
+
+ hinic_dcb_get_pfc_map(nic_dev, dcb_cfg, &pfc_map);
+ err = hinic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state,
+ pfc_map);
+ if (err) {
+ hinic_info(nic_dev, drv, "Failed to %s PFC\n",
+ dcb_cfg->pfc_state ? "enable" : "disable");
+ state = DCB_HW_CFG_ERR;
+ goto out;
+ }
+
+ if (dcb_cfg->pfc_state)
+ hinic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n",
+ pfc_map);
+ else
+ hinic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n");
+
+ nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC);
+ }
+
+out:
+ hinic_start_port_traffic_flow(nic_dev);
+
+ return state;
+}
+
+static int hinic_dcbnl_ieee_get_ets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
+
+ ets->ets_cap = my_ets->ets_cap;
+ memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+
+ return 0;
+}
+
+static int hinic_dcbnl_ieee_set_ets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
+ struct ieee_ets back_ets;
+ int err, i;
+ u8 max_tc = 0;
+ u16 total_bw = 0;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (!memcmp(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)) &&
+ !memcmp(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)) &&
+ !memcmp(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)) &&
+ !memcmp(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)))
+ return 0;
+
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++)
+ total_bw += ets->tc_tx_bw[i];
+ if (!total_bw)
+ return -EINVAL;
+
+ for (i = 0; i < dcb_cfg->pg_tcs; i++) {
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+ }
+ if (max_tc)
+ max_tc++;
+
+ if (max_tc > dcb_cfg->pg_tcs)
+ return -EINVAL;
+
+ max_tc = max_tc ? dcb_cfg->pg_tcs : 0;
+ memcpy(&back_ets, my_ets, sizeof(struct ieee_ets));
+ memcpy(my_ets->tc_tx_bw, ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(my_ets->tc_rx_bw, ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(my_ets->prio_tc, ets->prio_tc, sizeof(ets->prio_tc));
+ memcpy(my_ets->tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
+
+ if (max_tc != netdev_get_num_tc(netdev)) {
+ err = hinic_setup_tc(netdev, max_tc);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to setup tc with max_tc: %d, err: %d\n",
+ max_tc, err);
+ memcpy(my_ets, &back_ets, sizeof(struct ieee_ets));
+ return err;
+ }
+ }
+
+ err = hinic_stop_port_traffic_flow(nic_dev);
+ if (err)
+ return err;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ err = __set_hw_ets(nic_dev);
+
+ hinic_start_port_traffic_flow(nic_dev);
+
+ return err;
+}
+
+static int hinic_dcbnl_ieee_get_pfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc;
+
+ pfc->pfc_en = my_pfc->pfc_en;
+ pfc->pfc_cap = my_pfc->pfc_cap;
+
+ return 0;
+}
+
+static int hinic_dcbnl_ieee_set_pfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+ struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc;
+ struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets;
+ int err, i;
+ u8 pfc_map, max_tc;
+ u8 outof_range_pfc = 0;
+ bool pfc_en;
+
+ if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (my_pfc->pfc_en == pfc->pfc_en)
+ return 0;
+
+ pfc_map = pfc->pfc_en & nic_dev->up_valid_bitmap;
+ outof_range_pfc = pfc->pfc_en & (~nic_dev->up_valid_bitmap);
+ if (outof_range_pfc)
+ nicif_info(nic_dev, drv, netdev,
+ "pfc setting out of range, 0x%x will be ignored\n",
+ outof_range_pfc);
+
+ err = hinic_stop_port_traffic_flow(nic_dev);
+ if (err)
+ return err;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ pfc_en = pfc_map ? true : false;
+ max_tc = 0;
+ for (i = 0; i < dcb_cfg->pg_tcs; i++) {
+ if (my_ets->prio_tc[i] > max_tc)
+ max_tc = my_ets->prio_tc[i];
+ }
+ pfc_en = max_tc ? pfc_en : false;
+
+ err = hinic_dcb_set_pfc(nic_dev->hwdev, pfc_en, pfc_map);
+ if (err) {
+ hinic_info(nic_dev, drv,
+ "Failed to set pfc to hw with pfc_map: 0x%x err: %d\n",
+ pfc_map, err);
+ hinic_start_port_traffic_flow(nic_dev);
+ return err;
+ }
+
+ hinic_start_port_traffic_flow(nic_dev);
+ my_pfc->pfc_en = pfc->pfc_en;
+ hinic_info(nic_dev, drv,
+ "Set pfc successfully with pfc_map: 0x%x, pfc_en: %d\n",
+ pfc_map, pfc_en);
+
+ return 0;
+}
+
+static int hinic_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg;
+
+ if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
+ return -EINVAL;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = dcb_cfg->pg_tcs;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = dcb_cfg->pfc_tcs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ return -EINVAL;
+}
+
+static u8 hinic_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return (u8)nic_dev->dcb_cfg.pfc_state;
+}
+
+static void hinic_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->tmp_dcb_cfg.pfc_state = !!state;
+}
+
+static u8 hinic_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return nic_dev->dcbx_cap;
+}
+
+static u8 hinic_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ if (((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+ ((mode & DCB_CAP_DCBX_LLD_MANAGED) &&
+ (!(mode & DCB_CAP_DCBX_HOST)))) {
+ nicif_info(nic_dev, drv, netdev,
+ "Set dcbx failed with invalid mode: %d\n", mode);
+ return 1;
+ }
+
+ if (nic_dev->dcbx_cap == mode)
+ return 0;
+ nic_dev->dcbx_cap = mode;
+
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ u8 mask = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX |
+ DCB_CFG_CHG_PG_RX;
+ nic_dev->dcb_changes |= mask;
+ hinic_dcbnl_set_all(netdev);
+ } else if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ if (netdev_get_num_tc(netdev)) {
+ err = hinic_setup_tc(netdev, 0);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to setup tc with mode: %d\n",
+ mode);
+ return 1;
+ }
+ }
+
+ hinic_dcbnl_set_df_ieee_cfg(netdev);
+ hinic_force_port_relink(nic_dev->hwdev);
+ } else {
+ err = hinic_setup_tc(netdev, 0);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to setup tc with mode: %d\n", mode);
+ return 1;
+ }
+ }
+ nicif_info(nic_dev, drv, netdev, "Change dcbx mode to 0x%x\n", mode);
+
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops hinic_dcbnl_ops = {
+ /* IEEE 802.1Qaz std */
+ .ieee_getets = hinic_dcbnl_ieee_get_ets,
+ .ieee_setets = hinic_dcbnl_ieee_set_ets,
+ .ieee_getpfc = hinic_dcbnl_ieee_get_pfc,
+ .ieee_setpfc = hinic_dcbnl_ieee_set_pfc,
+
+ /* CEE std */
+ .getstate = hinic_dcbnl_get_state,
+ .setstate = hinic_dcbnl_set_state,
+ .getpermhwaddr = hinic_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = hinic_dcbnl_set_pg_tc_cfg_tx,
+ .setpgbwgcfgtx = hinic_dcbnl_set_pg_bwg_cfg_tx,
+ .setpgtccfgrx = hinic_dcbnl_set_pg_tc_cfg_rx,
+ .setpgbwgcfgrx = hinic_dcbnl_set_pg_bwg_cfg_rx,
+ .getpgtccfgtx = hinic_dcbnl_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = hinic_dcbnl_get_pg_bwg_cfg_tx,
+ .getpgtccfgrx = hinic_dcbnl_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = hinic_dcbnl_get_pg_bwg_cfg_rx,
+ .setpfccfg = hinic_dcbnl_set_pfc_cfg,
+ .getpfccfg = hinic_dcbnl_get_pfc_cfg,
+ .setall = hinic_dcbnl_set_all,
+ .getcap = hinic_dcbnl_getcap,
+ .getnumtcs = hinic_dcbnl_getnumtcs,
+ .setnumtcs = hinic_dcbnl_setnumtcs,
+ .getpfcstate = hinic_dcbnl_getpfcstate,
+ .setpfcstate = hinic_dcbnl_setpfcstate,
+
+ /* DCBX configuration */
+ .getdcbx = hinic_dcbnl_getdcbx,
+ .setdcbx = hinic_dcbnl_setdcbx,
+};
+
+int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ u8 state;
+
+ hinic_dcb_config_init(nic_dev, &nic_dev->tmp_dcb_cfg);
+ state = hinic_dcbnl_set_all(netdev);
+ if (state == DCB_HW_CFG_ERR)
+ return -EFAULT;
+
+ if (state == DCB_HW_CFG_CHG)
+ hinic_info(nic_dev, drv,
+ "Reset hardware DCB configuration done\n");
+
+ return 0;
+}
+
+void hinic_configure_dcb(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) {
+ memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->save_dcb_cfg,
+ sizeof(nic_dev->tmp_dcb_cfg));
+ hinic_dcbnl_set_all(netdev);
+ } else {
+ memcpy(&nic_dev->save_dcb_cfg, &nic_dev->tmp_dcb_cfg,
+ sizeof(nic_dev->save_dcb_cfg));
+ err = hinic_dcb_reset_hw_config(nic_dev);
+ if (err)
+ nicif_warn(nic_dev, drv, netdev,
+ "Failed to reset hw dcb configuration\n");
+ }
+}
+
+static bool __is_cos_up_map_change(struct hinic_nic_dev *nic_dev, u8 *cos_up)
+{
+ u8 cos, up;
+
+ for (cos = 0; cos < nic_dev->max_cos; cos++) {
+ up = cos_up[cos];
+ if (BIT(up) != (nic_dev->up_valid_bitmap & BIT(up)))
+ return true;
+ }
+
+ return false;
+}
+
+int __set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up)
+{
+ struct net_device *netdev;
+ u8 state;
+ int err = 0;
+
+ if (!nic_dev || !cos_up)
+ return -EINVAL;
+
+ netdev = nic_dev->netdev;
+
+ if (test_and_set_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Cos_up map setting in inprocess, please try again later\n");
+ return -EFAULT;
+ }
+
+ nicif_info(nic_dev, drv, netdev, "Set cos2up: %d%d%d%d%d%d%d%d\n",
+ cos_up[0], cos_up[1], cos_up[2], cos_up[3],
+ cos_up[4], cos_up[5], cos_up[6], cos_up[7]);
+
+ if (!__is_cos_up_map_change(nic_dev, cos_up)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Same mapping, don't need to change anything\n");
+ err = 0;
+ goto out;
+ }
+
+ err = hinic_set_up_cos_map(nic_dev, nic_dev->max_cos, cos_up);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ nic_dev->dcb_changes = DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX |
+ DCB_CFG_CHG_PFC | DCB_CFG_CHG_UP_COS;
+
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) {
+ /* Change map in kernel */
+ hinic_set_prio_tc_map(nic_dev);
+
+ state = hinic_dcbnl_set_all(netdev);
+ if (state == DCB_HW_CFG_ERR) {
+ nicif_err(nic_dev, drv, netdev,
+ "Reconfig dcb to hw failed\n");
+ err = -EFAULT;
+ }
+ }
+
+out:
+ clear_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags);
+
+ return err;
+}
+
+int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos)
+{
+ if (!nic_dev || !num_cos)
+ return -EINVAL;
+
+ *num_cos = nic_dev->max_cos;
+
+ return 0;
+}
+
+int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *num_cos,
+ u8 *cos_up)
+{
+ u8 up, cos;
+
+ if (!nic_dev || !cos_up)
+ return -EINVAL;
+
+ for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) {
+ for (up = 0; up < HINIC_DCB_UP_MAX; up++) {
+ if (!(nic_dev->up_valid_bitmap & BIT(up)))
+ continue;
+
+ if (nic_dev->up_cos[up] == cos ||
+ nic_dev->up_cos[up] == nic_dev->default_cos_id)
+ cos_up[cos] = up;
+ }
+ }
+
+ *num_cos = nic_dev->max_cos;
+
+ return 0;
+}
+
+static int __stop_port_flow(void *uld_array[], u32 num_dev)
+{
+ struct hinic_nic_dev *tmp_dev;
+ u32 i, idx;
+ int err;
+
+ for (idx = 0; idx < num_dev; idx++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[idx];
+ err = hinic_stop_port_traffic_flow(tmp_dev);
+ if (err) {
+ nicif_err(tmp_dev, drv, tmp_dev->netdev,
+ "Stop port traffic flow failed\n");
+ goto stop_port_err;
+ }
+ }
+
+ /* wait all traffic flow stopped */
+ msleep(HINIC_WAIT_PORT_IO_STOP);
+
+ return 0;
+
+stop_port_err:
+ for (i = 0; i < idx; i++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[i];
+ hinic_start_port_traffic_flow(tmp_dev);
+ }
+
+ return err;
+}
+
+static void __start_port_flow(void *uld_array[], u32 num_dev)
+{
+ struct hinic_nic_dev *tmp_dev;
+ u32 idx;
+
+ for (idx = 0; idx < num_dev; idx++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[idx];
+ hinic_start_port_traffic_flow(tmp_dev);
+ }
+}
+
+/* for hinicadm tool, need to chang all port of the chip */
+int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up)
+{
+ void *uld_array[HINIC_MAX_PF_NUM];
+ struct hinic_nic_dev *tmp_dev;
+ u8 num_cos, old_cos_up[HINIC_DCB_COS_MAX] = {0};
+ u32 i, idx, num_dev = 0;
+ int err, rollback_err;
+
+ /* Save old map, in case of set failed */
+ err = hinic_get_cos_up_map(nic_dev, &num_cos, old_cos_up);
+ if (err || !num_cos) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Get old cos_up map failed\n");
+ return -EFAULT;
+ }
+
+ if (!memcmp(cos_up, old_cos_up, sizeof(u8) * num_cos)) {
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "Same cos2up map, don't need to change anything\n");
+ return 0;
+ }
+
+ /* Get all pf of this chip */
+ err = hinic_get_pf_uld_array(nic_dev->pdev, &num_dev, uld_array);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Get all pf private handle failed\n");
+ return -EFAULT;
+ }
+
+ err = __stop_port_flow(uld_array, num_dev);
+ if (err)
+ return -EFAULT;
+
+ for (idx = 0; idx < num_dev; idx++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[idx];
+ err = __set_cos_up_map(tmp_dev, cos_up);
+ if (err) {
+ nicif_err(tmp_dev, drv, tmp_dev->netdev,
+ "Set cos_up map to hw failed\n");
+ goto set_err;
+ }
+ }
+
+ __start_port_flow(uld_array, num_dev);
+
+ hinic_set_chip_cos_up_map(nic_dev->pdev, cos_up);
+
+ return 0;
+
+set_err:
+ /* undo all settings */
+ for (i = 0; i < idx; i++) {
+ tmp_dev = (struct hinic_nic_dev *)uld_array[i];
+ rollback_err = __set_cos_up_map(tmp_dev, old_cos_up);
+ if (rollback_err)
+ nicif_err(tmp_dev, drv, tmp_dev->netdev,
+ "Undo cos_up map to hw failed\n");
+ }
+
+ __start_port_flow(uld_array, num_dev);
+
+ return err;
+}
+
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.h b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h
new file mode 100644
index 000000000000..89074a73fe6a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_DCB_H_
+#define HINIC_DCB_H_
+
+#define HINIC_DCB_CFG_TX 0
+#define HINIC_DCB_CFG_RX 1
+
+/* IEEE8021QAZ Transmission selection algorithm identifiers */
+#define IEEE8021Q_TSA_STRICT 0x0
+#define IEEE8021Q_TSA_CBSHAPER 0x1
+#define IEEE8021Q_TSA_ETS 0x2
+#define IEEE8021Q_TSA_VENDOR 0xFF
+
+enum HINIC_DCB_FLAGS {
+ HINIC_DCB_UP_COS_SETTING,
+ HINIC_DCB_TRAFFIC_STOPPED,
+};
+
+extern const struct dcbnl_rtnl_ops hinic_dcbnl_ops;
+
+u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up);
+
+int hinic_dcb_init(struct hinic_nic_dev *nic_dev);
+
+int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev);
+
+int hinic_setup_tc(struct net_device *netdev, u8 tc);
+
+void hinic_configure_dcb(struct net_device *netdev);
+
+int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up);
+
+int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos);
+
+int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev,
+ u8 *num_cos, u8 *cos_up);
+u8 hinic_setup_dcb_tool(struct net_device *netdev, u8 *dcb_en, bool wr_flag);
+void hinic_dcbnl_set_pfc_en_tool(struct net_device *netdev,
+ u8 *value, bool flag);
+void hinic_dcbnl_set_pfc_cfg_tool(struct net_device *netdev, u8 setting);
+void hinic_dcbnl_get_pfc_cfg_tool(struct net_device *netdev, u8 *setting);
+u8 hinic_dcbnl_set_pfc_tool(struct net_device *netdev);
+void hinic_dcbnl_get_tc_num_tool(struct net_device *netdev, u8 *tc_num);
+void hinic_dcbnl_set_ets_tc_tool(struct net_device *netdev, u8 tc[], bool flag);
+void hinic_dcbnl_set_ets_pecent_tool(struct net_device *netdev,
+ u8 percent[], bool flag);
+void hinic_dcbnl_set_ets_en_tool(struct net_device *netdev,
+ u8 *value, bool flag);
+void hinic_dcbnl_set_ets_strict_tool(struct net_device *netdev,
+ u8 *setting, bool flag);
+u8 hinic_dcbnl_set_ets_tool(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
deleted file mode 100644
index 061952c6c21a..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
+++ /dev/null
@@ -1,324 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/debugfs.h>
-#include <linux/device.h>
-
-#include "hinic_debugfs.h"
-
-static struct dentry *hinic_dbgfs_root;
-
-enum sq_dbg_info {
- GLB_SQ_ID,
- SQ_PI,
- SQ_CI,
- SQ_FI,
- SQ_MSIX_ENTRY,
-};
-
-static char *sq_fields[] = {"glb_sq_id", "sq_pi", "sq_ci", "sq_fi", "sq_msix_entry"};
-
-static u64 hinic_dbg_get_sq_info(struct hinic_dev *nic_dev, struct hinic_sq *sq, int idx)
-{
- struct hinic_wq *wq = sq->wq;
-
- switch (idx) {
- case GLB_SQ_ID:
- return nic_dev->hwdev->func_to_io.global_qpn + sq->qid;
- case SQ_PI:
- return atomic_read(&wq->prod_idx) & wq->mask;
- case SQ_CI:
- return atomic_read(&wq->cons_idx) & wq->mask;
- case SQ_FI:
- return be16_to_cpu(*(__be16 *)(sq->hw_ci_addr)) & wq->mask;
- case SQ_MSIX_ENTRY:
- return sq->msix_entry;
- }
-
- return 0;
-}
-
-enum rq_dbg_info {
- GLB_RQ_ID,
- RQ_HW_PI,
- RQ_SW_CI,
- RQ_SW_PI,
- RQ_MSIX_ENTRY,
-};
-
-static char *rq_fields[] = {"glb_rq_id", "rq_hw_pi", "rq_sw_ci", "rq_sw_pi", "rq_msix_entry"};
-
-static u64 hinic_dbg_get_rq_info(struct hinic_dev *nic_dev, struct hinic_rq *rq, int idx)
-{
- struct hinic_wq *wq = rq->wq;
-
- switch (idx) {
- case GLB_RQ_ID:
- return nic_dev->hwdev->func_to_io.global_qpn + rq->qid;
- case RQ_HW_PI:
- return be16_to_cpu(*(__be16 *)(rq->pi_virt_addr)) & wq->mask;
- case RQ_SW_CI:
- return atomic_read(&wq->cons_idx) & wq->mask;
- case RQ_SW_PI:
- return atomic_read(&wq->prod_idx) & wq->mask;
- case RQ_MSIX_ENTRY:
- return rq->msix_entry;
- }
-
- return 0;
-}
-
-enum func_tbl_info {
- VALID,
- RX_MODE,
- MTU,
- RQ_DEPTH,
- QUEUE_NUM,
-};
-
-static char *func_table_fields[] = {"valid", "rx_mode", "mtu", "rq_depth", "cfg_q_num"};
-
-static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
-{
- struct tag_sml_funcfg_tbl *funcfg_table_elem;
- struct hinic_cmd_lt_rd *read_data;
- u16 out_size = sizeof(*read_data);
- int ret = ~0;
- int err;
-
- read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
- if (!read_data)
- return ~0;
-
- read_data->node = TBL_ID_FUNC_CFG_SM_NODE;
- read_data->inst = TBL_ID_FUNC_CFG_SM_INST;
- read_data->entry_size = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
- read_data->lt_index = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
- read_data->len = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
-
- err = hinic_port_msg_cmd(nic_dev->hwdev, HINIC_PORT_CMD_RD_LINE_TBL, read_data,
- sizeof(*read_data), read_data, &out_size);
- if (err || out_size != sizeof(*read_data) || read_data->status) {
- netif_err(nic_dev, drv, nic_dev->netdev,
- "Failed to get func table, err: %d, status: 0x%x, out size: 0x%x\n",
- err, read_data->status, out_size);
- kfree(read_data);
- return ~0;
- }
-
- funcfg_table_elem = (struct tag_sml_funcfg_tbl *)read_data->data;
-
- switch (idx) {
- case VALID:
- ret = funcfg_table_elem->dw0.bs.valid;
- break;
- case RX_MODE:
- ret = funcfg_table_elem->dw0.bs.nic_rx_mode;
- break;
- case MTU:
- ret = funcfg_table_elem->dw1.bs.mtu;
- break;
- case RQ_DEPTH:
- ret = funcfg_table_elem->dw13.bs.cfg_rq_depth;
- break;
- case QUEUE_NUM:
- ret = funcfg_table_elem->dw13.bs.cfg_q_num;
- break;
- }
-
- kfree(read_data);
-
- return ret;
-}
-
-static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct hinic_debug_priv *dbg;
- char ret_buf[20];
- int *desc;
- u64 out;
- int ret;
-
- desc = filp->private_data;
- dbg = container_of(desc, struct hinic_debug_priv, field_id[*desc]);
-
- switch (dbg->type) {
- case HINIC_DBG_SQ_INFO:
- out = hinic_dbg_get_sq_info(dbg->dev, dbg->object, *desc);
- break;
-
- case HINIC_DBG_RQ_INFO:
- out = hinic_dbg_get_rq_info(dbg->dev, dbg->object, *desc);
- break;
-
- case HINIC_DBG_FUNC_TABLE:
- out = hinic_dbg_get_func_table(dbg->dev, *desc);
- break;
-
- default:
- netif_warn(dbg->dev, drv, dbg->dev->netdev, "Invalid hinic debug cmd: %d\n",
- dbg->type);
- return -EINVAL;
- }
-
- ret = snprintf(ret_buf, sizeof(ret_buf), "0x%llx\n", out);
-
- return simple_read_from_buffer(buffer, count, ppos, ret_buf, ret);
-}
-
-static const struct file_operations hinic_dbg_cmd_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = hinic_dbg_cmd_read,
-};
-
-static int create_dbg_files(struct hinic_dev *dev, enum hinic_dbg_type type, void *data,
- struct dentry *root, struct hinic_debug_priv **dbg, char **field,
- int nfile)
-{
- struct hinic_debug_priv *tmp;
- int i;
-
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- tmp->dev = dev;
- tmp->object = data;
- tmp->type = type;
- tmp->root = root;
-
- for (i = 0; i < nfile; i++) {
- tmp->field_id[i] = i;
- debugfs_create_file(field[i], 0400, root, &tmp->field_id[i], &hinic_dbg_cmd_fops);
- }
-
- *dbg = tmp;
-
- return 0;
-}
-
-static void rem_dbg_files(struct hinic_debug_priv *dbg)
-{
- if (dbg->type != HINIC_DBG_FUNC_TABLE)
- debugfs_remove_recursive(dbg->root);
-
- kfree(dbg);
-}
-
-int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id)
-{
- struct hinic_sq *sq;
- struct dentry *root;
- char sub_dir[16];
-
- sq = dev->txqs[sq_id].sq;
-
- sprintf(sub_dir, "0x%x", sq_id);
-
- root = debugfs_create_dir(sub_dir, dev->sq_dbgfs);
-
- return create_dbg_files(dev, HINIC_DBG_SQ_INFO, sq, root, &sq->dbg, sq_fields,
- ARRAY_SIZE(sq_fields));
-}
-
-void hinic_sq_debug_rem(struct hinic_sq *sq)
-{
- if (sq->dbg)
- rem_dbg_files(sq->dbg);
-}
-
-int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id)
-{
- struct hinic_rq *rq;
- struct dentry *root;
- char sub_dir[16];
-
- rq = dev->rxqs[rq_id].rq;
-
- sprintf(sub_dir, "0x%x", rq_id);
-
- root = debugfs_create_dir(sub_dir, dev->rq_dbgfs);
-
- return create_dbg_files(dev, HINIC_DBG_RQ_INFO, rq, root, &rq->dbg, rq_fields,
- ARRAY_SIZE(rq_fields));
-}
-
-void hinic_rq_debug_rem(struct hinic_rq *rq)
-{
- if (rq->dbg)
- rem_dbg_files(rq->dbg);
-}
-
-int hinic_func_table_debug_add(struct hinic_dev *dev)
-{
- if (HINIC_IS_VF(dev->hwdev->hwif))
- return 0;
-
- return create_dbg_files(dev, HINIC_DBG_FUNC_TABLE, dev, dev->func_tbl_dbgfs, &dev->dbg,
- func_table_fields, ARRAY_SIZE(func_table_fields));
-}
-
-void hinic_func_table_debug_rem(struct hinic_dev *dev)
-{
- if (!HINIC_IS_VF(dev->hwdev->hwif) && dev->dbg)
- rem_dbg_files(dev->dbg);
-}
-
-void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev)
-{
- nic_dev->sq_dbgfs = debugfs_create_dir("SQs", nic_dev->dbgfs_root);
-}
-
-void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev)
-{
- debugfs_remove_recursive(nic_dev->sq_dbgfs);
-}
-
-void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev)
-{
- nic_dev->rq_dbgfs = debugfs_create_dir("RQs", nic_dev->dbgfs_root);
-}
-
-void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev)
-{
- debugfs_remove_recursive(nic_dev->rq_dbgfs);
-}
-
-void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev)
-{
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- nic_dev->func_tbl_dbgfs = debugfs_create_dir("func_table", nic_dev->dbgfs_root);
-}
-
-void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev)
-{
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- debugfs_remove_recursive(nic_dev->func_tbl_dbgfs);
-}
-
-void hinic_dbg_init(struct hinic_dev *nic_dev)
-{
- nic_dev->dbgfs_root = debugfs_create_dir(pci_name(nic_dev->hwdev->hwif->pdev),
- hinic_dbgfs_root);
-}
-
-void hinic_dbg_uninit(struct hinic_dev *nic_dev)
-{
- debugfs_remove_recursive(nic_dev->dbgfs_root);
- nic_dev->dbgfs_root = NULL;
-}
-
-void hinic_dbg_register_debugfs(const char *debugfs_dir_name)
-{
- hinic_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
-}
-
-void hinic_dbg_unregister_debugfs(void)
-{
- debugfs_remove_recursive(hinic_dbgfs_root);
- hinic_dbgfs_root = NULL;
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
deleted file mode 100644
index e10f739d8339..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_DEBUGFS_H
-#define HINIC_DEBUGFS_H
-
-#include "hinic_dev.h"
-
-#define TBL_ID_FUNC_CFG_SM_NODE 11
-#define TBL_ID_FUNC_CFG_SM_INST 1
-
-#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
-
-struct hinic_cmd_lt_rd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- unsigned char node;
- unsigned char inst;
- unsigned char entry_size;
- unsigned char rsvd;
- unsigned int lt_index;
- unsigned int offset;
- unsigned int len;
- unsigned char data[100];
-};
-
-struct tag_sml_funcfg_tbl {
- union {
- struct {
- u32 rsvd0 :8;
- u32 nic_rx_mode :5;
- u32 rsvd1 :18;
- u32 valid :1;
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
- u32 vlan_id :12;
- u32 vlan_mode :3;
- u32 fast_recycled_mode :1;
- u32 mtu :16;
- } bs;
-
- u32 value;
- } dw1;
-
- u32 dw2;
- u32 dw3;
- u32 dw4;
- u32 dw5;
- u32 dw6;
- u32 dw7;
- u32 dw8;
- u32 dw9;
- u32 dw10;
- u32 dw11;
- u32 dw12;
-
- union {
- struct {
- u32 rsvd2 :15;
- u32 cfg_q_num :9;
- u32 cfg_rq_depth :6;
- u32 vhd_type :2;
- } bs;
-
- u32 value;
- } dw13;
-
- u32 dw14;
- u32 dw15;
-};
-
-int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id);
-
-void hinic_sq_debug_rem(struct hinic_sq *sq);
-
-int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id);
-
-void hinic_rq_debug_rem(struct hinic_rq *rq);
-
-int hinic_func_table_debug_add(struct hinic_dev *dev);
-
-void hinic_func_table_debug_rem(struct hinic_dev *dev);
-
-void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev);
-
-void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev);
-
-void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev);
-
-void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev);
-
-void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev);
-
-void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev);
-
-void hinic_dbg_init(struct hinic_dev *nic_dev);
-
-void hinic_dbg_uninit(struct hinic_dev *nic_dev);
-
-void hinic_dbg_register_debugfs(const char *debugfs_dir_name);
-
-void hinic_dbg_unregister_debugfs(void);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
deleted file mode 100644
index 52ea97c818b8..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_DEV_H
-#define HINIC_DEV_H
-
-#include <linux/netdevice.h>
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_dev.h"
-#include "hinic_tx.h"
-#include "hinic_rx.h"
-#include "hinic_sriov.h"
-
-#define HINIC_DRV_NAME "hinic"
-
-#define LP_PKT_CNT 64
-
-#define HINIC_MAX_JUMBO_FRAME_SIZE 15872
-#define HINIC_MAX_MTU_SIZE (HINIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN)
-#define HINIC_MIN_MTU_SIZE 256
-
-enum hinic_flags {
- HINIC_LINK_UP = BIT(0),
- HINIC_INTF_UP = BIT(1),
- HINIC_RSS_ENABLE = BIT(2),
- HINIC_LINK_DOWN = BIT(3),
- HINIC_LP_TEST = BIT(4),
-};
-
-struct hinic_rx_mode_work {
- struct work_struct work;
- u32 rx_mode;
-};
-
-struct hinic_rss_type {
- u8 tcp_ipv6_ext;
- u8 ipv6_ext;
- u8 tcp_ipv6;
- u8 ipv6;
- u8 tcp_ipv4;
- u8 ipv4;
- u8 udp_ipv6;
- u8 udp_ipv4;
-};
-
-enum hinic_rss_hash_type {
- HINIC_RSS_HASH_ENGINE_TYPE_XOR,
- HINIC_RSS_HASH_ENGINE_TYPE_TOEP,
- HINIC_RSS_HASH_ENGINE_TYPE_MAX,
-};
-
-struct hinic_intr_coal_info {
- u8 pending_limt;
- u8 coalesce_timer_cfg;
- u8 resend_timer_cfg;
-};
-
-enum hinic_dbg_type {
- HINIC_DBG_SQ_INFO,
- HINIC_DBG_RQ_INFO,
- HINIC_DBG_FUNC_TABLE,
-};
-
-struct hinic_debug_priv {
- struct hinic_dev *dev;
- void *object;
- enum hinic_dbg_type type;
- struct dentry *root;
- int field_id[64];
-};
-
-struct hinic_dev {
- struct net_device *netdev;
- struct hinic_hwdev *hwdev;
-
- u32 msg_enable;
- unsigned int tx_weight;
- unsigned int rx_weight;
- u16 num_qps;
- u16 max_qps;
-
- unsigned int flags;
-
- struct semaphore mgmt_lock;
- unsigned long *vlan_bitmap;
-
- struct hinic_rx_mode_work rx_mode_work;
- struct workqueue_struct *workq;
-
- struct hinic_txq *txqs;
- struct hinic_rxq *rxqs;
- u16 sq_depth;
- u16 rq_depth;
-
- u8 rss_tmpl_idx;
- u8 rss_hash_engine;
- u16 num_rss;
- u16 rss_limit;
- struct hinic_rss_type rss_type;
- u8 *rss_hkey_user;
- s32 *rss_indir_user;
- struct hinic_intr_coal_info *rx_intr_coalesce;
- struct hinic_intr_coal_info *tx_intr_coalesce;
- struct hinic_sriov_info sriov_info;
- int lb_test_rx_idx;
- int lb_pkt_len;
- u8 *lb_test_rx_buf;
-
- struct dentry *dbgfs_root;
- struct dentry *sq_dbgfs;
- struct dentry *rq_dbgfs;
- struct dentry *func_tbl_dbgfs;
- struct hinic_debug_priv *dbg;
- struct devlink *devlink;
- bool cable_unplugged;
- bool module_unrecognized;
-};
-
-struct hinic_devlink_priv {
- struct hinic_hwdev *hwdev;
- struct devlink_health_reporter *hw_fault_reporter;
- struct devlink_health_reporter *fw_fault_reporter;
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
deleted file mode 100644
index 1749d26f4bef..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ /dev/null
@@ -1,584 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-#include <linux/netlink.h>
-#include <net/devlink.h>
-#include <linux/firmware.h>
-
-#include "hinic_port.h"
-#include "hinic_devlink.h"
-#include "hinic_hw_dev.h"
-
-static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf,
- u32 image_size, struct host_image_st *host_image)
-{
- struct fw_image_st *fw_image = NULL;
- u32 len = 0;
- u32 i;
-
- fw_image = (struct fw_image_st *)buf;
-
- if (fw_image->fw_magic != HINIC_MAGIC_NUM) {
- dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_magic read from file, fw_magic: 0x%x\n",
- fw_image->fw_magic);
- return false;
- }
-
- if (fw_image->fw_info.fw_section_cnt > MAX_FW_TYPE_NUM) {
- dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_type_num read from file, fw_type_num: 0x%x\n",
- fw_image->fw_info.fw_section_cnt);
- return false;
- }
-
- for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) {
- len += fw_image->fw_section_info[i].fw_section_len;
- host_image->image_section_info[i] = fw_image->fw_section_info[i];
- }
-
- if (len != fw_image->fw_len ||
- (fw_image->fw_len + UPDATEFW_IMAGE_HEAD_SIZE) != image_size) {
- dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong data size read from file\n");
- return false;
- }
-
- host_image->image_info.up_total_len = fw_image->fw_len;
- host_image->image_info.fw_version = fw_image->fw_version;
- host_image->section_type_num = fw_image->fw_info.fw_section_cnt;
- host_image->device_id = fw_image->device_id;
-
- return true;
-}
-
-static bool check_image_integrity(struct hinic_devlink_priv *priv,
- struct host_image_st *host_image,
- u32 update_type)
-{
- u32 collect_section_type = 0;
- u32 i, type;
-
- for (i = 0; i < host_image->section_type_num; i++) {
- type = host_image->image_section_info[i].fw_section_type;
- if (collect_section_type & (1U << type)) {
- dev_err(&priv->hwdev->hwif->pdev->dev, "Duplicate section type: %u\n",
- type);
- return false;
- }
- collect_section_type |= (1U << type);
- }
-
- if (update_type == FW_UPDATE_COLD &&
- (((collect_section_type & _IMAGE_COLD_SUB_MODULES_MUST_IN) ==
- _IMAGE_COLD_SUB_MODULES_MUST_IN) ||
- collect_section_type == _IMAGE_CFG_SUB_MODULES_MUST_IN))
- return true;
-
- if (update_type == FW_UPDATE_HOT &&
- (collect_section_type & _IMAGE_HOT_SUB_MODULES_MUST_IN) ==
- _IMAGE_HOT_SUB_MODULES_MUST_IN)
- return true;
-
- if (update_type == FW_UPDATE_COLD)
- dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid: 0x%x or 0x%lx, current: 0x%x\n",
- _IMAGE_COLD_SUB_MODULES_MUST_IN,
- _IMAGE_CFG_SUB_MODULES_MUST_IN, collect_section_type);
- else
- dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid:0x%x, current: 0x%x\n",
- _IMAGE_HOT_SUB_MODULES_MUST_IN, collect_section_type);
-
- return false;
-}
-
-static int check_image_device_type(struct hinic_devlink_priv *priv,
- u32 image_device_type)
-{
- struct hinic_comm_board_info board_info = {0};
-
- if (hinic_get_board_info(priv->hwdev, &board_info)) {
- dev_err(&priv->hwdev->hwif->pdev->dev, "Get board info failed\n");
- return false;
- }
-
- if (image_device_type == board_info.info.board_type)
- return true;
-
- dev_err(&priv->hwdev->hwif->pdev->dev, "The device type of upgrade file doesn't match the device type of current firmware, please check the upgrade file\n");
- dev_err(&priv->hwdev->hwif->pdev->dev, "The image device type: 0x%x, firmware device type: 0x%x\n",
- image_device_type, board_info.info.board_type);
-
- return false;
-}
-
-static int hinic_flash_fw(struct hinic_devlink_priv *priv, const u8 *data,
- struct host_image_st *host_image)
-{
- u32 section_remain_send_len, send_fragment_len, send_pos, up_total_len;
- struct hinic_cmd_update_fw *fw_update_msg = NULL;
- u32 section_type, section_crc, section_version;
- u32 i, len, section_len, section_offset;
- u16 out_size = sizeof(*fw_update_msg);
- int total_len_flag = 0;
- int err;
-
- fw_update_msg = kzalloc(sizeof(*fw_update_msg), GFP_KERNEL);
- if (!fw_update_msg)
- return -ENOMEM;
-
- up_total_len = host_image->image_info.up_total_len;
-
- for (i = 0; i < host_image->section_type_num; i++) {
- len = host_image->image_section_info[i].fw_section_len;
- if (host_image->image_section_info[i].fw_section_type ==
- UP_FW_UPDATE_BOOT) {
- up_total_len = up_total_len - len;
- break;
- }
- }
-
- for (i = 0; i < host_image->section_type_num; i++) {
- section_len =
- host_image->image_section_info[i].fw_section_len;
- section_offset =
- host_image->image_section_info[i].fw_section_offset;
- section_remain_send_len = section_len;
- section_type =
- host_image->image_section_info[i].fw_section_type;
- section_crc = host_image->image_section_info[i].fw_section_crc;
- section_version =
- host_image->image_section_info[i].fw_section_version;
-
- if (section_type == UP_FW_UPDATE_BOOT)
- continue;
-
- send_fragment_len = 0;
- send_pos = 0;
-
- while (section_remain_send_len > 0) {
- if (!total_len_flag) {
- fw_update_msg->total_len = up_total_len;
- total_len_flag = 1;
- } else {
- fw_update_msg->total_len = 0;
- }
-
- memset(fw_update_msg->data, 0, MAX_FW_FRAGMENT_LEN);
-
- fw_update_msg->ctl_info.SF =
- (section_remain_send_len == section_len) ?
- true : false;
- fw_update_msg->section_info.FW_section_CRC = section_crc;
- fw_update_msg->fw_section_version = section_version;
- fw_update_msg->ctl_info.flag = UP_TYPE_A;
-
- if (section_type <= UP_FW_UPDATE_UP_DATA_B) {
- fw_update_msg->section_info.FW_section_type =
- (section_type % 2) ?
- UP_FW_UPDATE_UP_DATA :
- UP_FW_UPDATE_UP_TEXT;
-
- fw_update_msg->ctl_info.flag = UP_TYPE_B;
- if (section_type <= UP_FW_UPDATE_UP_DATA_A)
- fw_update_msg->ctl_info.flag = UP_TYPE_A;
- } else {
- fw_update_msg->section_info.FW_section_type =
- section_type - 0x2;
- }
-
- fw_update_msg->setion_total_len = section_len;
- fw_update_msg->section_offset = send_pos;
-
- if (section_remain_send_len <= MAX_FW_FRAGMENT_LEN) {
- fw_update_msg->ctl_info.SL = true;
- fw_update_msg->ctl_info.fragment_len =
- section_remain_send_len;
- send_fragment_len += section_remain_send_len;
- } else {
- fw_update_msg->ctl_info.SL = false;
- fw_update_msg->ctl_info.fragment_len =
- MAX_FW_FRAGMENT_LEN;
- send_fragment_len += MAX_FW_FRAGMENT_LEN;
- }
-
- memcpy(fw_update_msg->data,
- data + UPDATEFW_IMAGE_HEAD_SIZE +
- section_offset + send_pos,
- fw_update_msg->ctl_info.fragment_len);
-
- err = hinic_port_msg_cmd(priv->hwdev,
- HINIC_PORT_CMD_UPDATE_FW,
- fw_update_msg,
- sizeof(*fw_update_msg),
- fw_update_msg, &out_size);
- if (err || !out_size || fw_update_msg->status) {
- dev_err(&priv->hwdev->hwif->pdev->dev, "Failed to update firmware, err: %d, status: 0x%x, out size: 0x%x\n",
- err, fw_update_msg->status, out_size);
- err = fw_update_msg->status ?
- fw_update_msg->status : -EIO;
- kfree(fw_update_msg);
- return err;
- }
-
- send_pos = send_fragment_len;
- section_remain_send_len = section_len -
- send_fragment_len;
- }
- }
-
- kfree(fw_update_msg);
-
- return 0;
-}
-
-static int hinic_firmware_update(struct hinic_devlink_priv *priv,
- const struct firmware *fw,
- struct netlink_ext_ack *extack)
-{
- struct host_image_st host_image;
- int err;
-
- memset(&host_image, 0, sizeof(struct host_image_st));
-
- if (!check_image_valid(priv, fw->data, fw->size, &host_image) ||
- !check_image_integrity(priv, &host_image, FW_UPDATE_COLD) ||
- !check_image_device_type(priv, host_image.device_id)) {
- NL_SET_ERR_MSG_MOD(extack, "Check image failed");
- return -EINVAL;
- }
-
- dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware begin\n");
-
- err = hinic_flash_fw(priv, fw->data, &host_image);
- if (err) {
- if (err == HINIC_FW_DISMATCH_ERROR) {
- dev_err(&priv->hwdev->hwif->pdev->dev, "Firmware image doesn't match this card, please use newer image, err: %d\n",
- err);
- NL_SET_ERR_MSG_MOD(extack,
- "Firmware image doesn't match this card, please use newer image");
- } else {
- dev_err(&priv->hwdev->hwif->pdev->dev, "Send firmware image data failed, err: %d\n",
- err);
- NL_SET_ERR_MSG_MOD(extack, "Send firmware image data failed");
- }
-
- return err;
- }
-
- dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware end\n");
-
- return 0;
-}
-
-static int hinic_devlink_flash_update(struct devlink *devlink,
- struct devlink_flash_update_params *params,
- struct netlink_ext_ack *extack)
-{
- struct hinic_devlink_priv *priv = devlink_priv(devlink);
-
- return hinic_firmware_update(priv, params->fw, extack);
-}
-
-static const struct devlink_ops hinic_devlink_ops = {
- .flash_update = hinic_devlink_flash_update,
-};
-
-struct devlink *hinic_devlink_alloc(struct device *dev)
-{
- return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev), dev);
-}
-
-void hinic_devlink_free(struct devlink *devlink)
-{
- devlink_free(devlink);
-}
-
-void hinic_devlink_register(struct hinic_devlink_priv *priv)
-{
- struct devlink *devlink = priv_to_devlink(priv);
-
- devlink_register(devlink);
-}
-
-void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
-{
- struct devlink *devlink = priv_to_devlink(priv);
-
- devlink_unregister(devlink);
-}
-
-static int chip_fault_show(struct devlink_fmsg *fmsg,
- struct hinic_fault_event *event)
-{
- const char * const level_str[FAULT_LEVEL_MAX + 1] = {
- "fatal", "reset", "flr", "general", "suggestion", "Unknown"};
- u8 fault_level;
- int err;
-
- fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ?
- event->event.chip.err_level : FAULT_LEVEL_MAX;
- if (fault_level == FAULT_LEVEL_SERIOUS_FLR) {
- err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id",
- (u32)event->event.chip.func_id);
- if (err)
- return err;
- }
-
- err = devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr",
- event->event.chip.err_csr_addr);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_value",
- event->event.chip.err_csr_value);
- if (err)
- return err;
-
- return 0;
-}
-
-static int fault_report_show(struct devlink_fmsg *fmsg,
- struct hinic_fault_event *event)
-{
- const char * const type_str[FAULT_TYPE_MAX + 1] = {
- "chip", "ucode", "mem rd timeout", "mem wr timeout",
- "reg rd timeout", "reg wr timeout", "phy fault", "Unknown"};
- u8 fault_type;
- int err;
-
- fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX;
-
- err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]);
- if (err)
- return err;
-
- err = devlink_fmsg_binary_pair_put(fmsg, "Fault raw data",
- event->event.val, sizeof(event->event.val));
- if (err)
- return err;
-
- switch (event->type) {
- case FAULT_TYPE_CHIP:
- err = chip_fault_show(fmsg, event);
- if (err)
- return err;
- break;
- case FAULT_TYPE_UCODE:
- err = devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc);
- if (err)
- return err;
- break;
- case FAULT_TYPE_MEM_RD_TIMEOUT:
- case FAULT_TYPE_MEM_WR_TIMEOUT:
- err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl",
- event->event.mem_timeout.err_csr_ctrl);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_data",
- event->event.mem_timeout.err_csr_data);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab",
- event->event.mem_timeout.ctrl_tab);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "mem_index",
- event->event.mem_timeout.mem_index);
- if (err)
- return err;
- break;
- case FAULT_TYPE_REG_RD_TIMEOUT:
- case FAULT_TYPE_REG_WR_TIMEOUT:
- err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr);
- if (err)
- return err;
- break;
- case FAULT_TYPE_PHY_FAULT:
- err = devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data);
- if (err)
- return err;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx,
- struct netlink_ext_ack *extack)
-{
- if (priv_ctx)
- return fault_report_show(fmsg, priv_ctx);
-
- return 0;
-}
-
-static int mgmt_watchdog_report_show(struct devlink_fmsg *fmsg,
- struct hinic_mgmt_watchdog_info *watchdog_info)
-{
- int err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", watchdog_info->curr_time_h);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "time_l", watchdog_info->curr_time_l);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "task_id", watchdog_info->task_id);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "sp", watchdog_info->sp);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", watchdog_info->curr_used);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "peak_used", watchdog_info->peak_used);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", watchdog_info->is_overflow);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "stack_top", watchdog_info->stack_top);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", watchdog_info->stack_bottom);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", watchdog_info->pc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "lr", watchdog_info->lr);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cpsr", watchdog_info->cpsr);
- if (err)
- return err;
-
- err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info",
- watchdog_info->reg, sizeof(watchdog_info->reg));
- if (err)
- return err;
-
- err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)",
- watchdog_info->data, sizeof(watchdog_info->data));
- if (err)
- return err;
-
- return 0;
-}
-
-static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx,
- struct netlink_ext_ack *extack)
-{
- if (priv_ctx)
- return mgmt_watchdog_report_show(fmsg, priv_ctx);
-
- return 0;
-}
-
-static const struct devlink_health_reporter_ops hinic_hw_fault_reporter_ops = {
- .name = "hw",
- .dump = hinic_hw_reporter_dump,
-};
-
-static const struct devlink_health_reporter_ops hinic_fw_fault_reporter_ops = {
- .name = "fw",
- .dump = hinic_fw_reporter_dump,
-};
-
-int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
-{
- struct devlink *devlink = priv_to_devlink(priv);
-
- priv->hw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops,
- 0, priv);
- if (IS_ERR(priv->hw_fault_reporter)) {
- dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n",
- PTR_ERR(priv->hw_fault_reporter));
- return PTR_ERR(priv->hw_fault_reporter);
- }
-
- priv->fw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops,
- 0, priv);
- if (IS_ERR(priv->fw_fault_reporter)) {
- dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n",
- PTR_ERR(priv->fw_fault_reporter));
- devlink_health_reporter_destroy(priv->hw_fault_reporter);
- priv->hw_fault_reporter = NULL;
- return PTR_ERR(priv->fw_fault_reporter);
- }
-
- return 0;
-}
-
-void hinic_health_reporters_destroy(struct hinic_devlink_priv *priv)
-{
- if (!IS_ERR_OR_NULL(priv->fw_fault_reporter)) {
- devlink_health_reporter_destroy(priv->fw_fault_reporter);
- priv->fw_fault_reporter = NULL;
- }
-
- if (!IS_ERR_OR_NULL(priv->hw_fault_reporter)) {
- devlink_health_reporter_destroy(priv->hw_fault_reporter);
- priv->hw_fault_reporter = NULL;
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
deleted file mode 100644
index 46760d607b9b..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef __HINIC_DEVLINK_H__
-#define __HINIC_DEVLINK_H__
-
-#include <net/devlink.h>
-#include "hinic_dev.h"
-
-#define MAX_FW_TYPE_NUM 30
-#define HINIC_MAGIC_NUM 0x18221100
-#define UPDATEFW_IMAGE_HEAD_SIZE 1024
-#define FW_UPDATE_COLD 0
-#define FW_UPDATE_HOT 1
-
-#define UP_TYPE_A 0x0
-#define UP_TYPE_B 0x1
-
-#define MAX_FW_FRAGMENT_LEN 1536
-#define HINIC_FW_DISMATCH_ERROR 10
-
-enum hinic_fw_type {
- UP_FW_UPDATE_UP_TEXT_A = 0x0,
- UP_FW_UPDATE_UP_DATA_A,
- UP_FW_UPDATE_UP_TEXT_B,
- UP_FW_UPDATE_UP_DATA_B,
- UP_FW_UPDATE_UP_DICT,
-
- UP_FW_UPDATE_HLINK_ONE = 0x5,
- UP_FW_UPDATE_HLINK_TWO,
- UP_FW_UPDATE_HLINK_THR,
- UP_FW_UPDATE_PHY,
- UP_FW_UPDATE_TILE_TEXT,
-
- UP_FW_UPDATE_TILE_DATA = 0xa,
- UP_FW_UPDATE_TILE_DICT,
- UP_FW_UPDATE_PPE_STATE,
- UP_FW_UPDATE_PPE_BRANCH,
- UP_FW_UPDATE_PPE_EXTACT,
-
- UP_FW_UPDATE_CLP_LEGACY = 0xf,
- UP_FW_UPDATE_PXE_LEGACY,
- UP_FW_UPDATE_ISCSI_LEGACY,
- UP_FW_UPDATE_CLP_EFI,
- UP_FW_UPDATE_PXE_EFI,
-
- UP_FW_UPDATE_ISCSI_EFI = 0x14,
- UP_FW_UPDATE_CFG,
- UP_FW_UPDATE_BOOT,
- UP_FW_UPDATE_VPD,
- FILE_TYPE_TOTAL_NUM
-};
-
-#define _IMAGE_UP_ALL_IN ((1 << UP_FW_UPDATE_UP_TEXT_A) | \
- (1 << UP_FW_UPDATE_UP_DATA_A) | \
- (1 << UP_FW_UPDATE_UP_TEXT_B) | \
- (1 << UP_FW_UPDATE_UP_DATA_B) | \
- (1 << UP_FW_UPDATE_UP_DICT) | \
- (1 << UP_FW_UPDATE_BOOT) | \
- (1 << UP_FW_UPDATE_HLINK_ONE) | \
- (1 << UP_FW_UPDATE_HLINK_TWO) | \
- (1 << UP_FW_UPDATE_HLINK_THR))
-
-#define _IMAGE_UCODE_ALL_IN ((1 << UP_FW_UPDATE_TILE_TEXT) | \
- (1 << UP_FW_UPDATE_TILE_DICT) | \
- (1 << UP_FW_UPDATE_PPE_STATE) | \
- (1 << UP_FW_UPDATE_PPE_BRANCH) | \
- (1 << UP_FW_UPDATE_PPE_EXTACT))
-
-#define _IMAGE_COLD_SUB_MODULES_MUST_IN (_IMAGE_UP_ALL_IN | _IMAGE_UCODE_ALL_IN)
-#define _IMAGE_HOT_SUB_MODULES_MUST_IN (_IMAGE_UP_ALL_IN | _IMAGE_UCODE_ALL_IN)
-#define _IMAGE_CFG_SUB_MODULES_MUST_IN BIT(UP_FW_UPDATE_CFG)
-#define UP_FW_UPDATE_UP_TEXT 0x0
-#define UP_FW_UPDATE_UP_DATA 0x1
-#define UP_FW_UPDATE_VPD_B 0x15
-
-struct fw_section_info_st {
- u32 fw_section_len;
- u32 fw_section_offset;
- u32 fw_section_version;
- u32 fw_section_type;
- u32 fw_section_crc;
-};
-
-struct fw_image_st {
- u32 fw_version;
- u32 fw_len;
- u32 fw_magic;
- struct {
- u32 fw_section_cnt:16;
- u32 resd:16;
- } fw_info;
- struct fw_section_info_st fw_section_info[MAX_FW_TYPE_NUM];
- u32 device_id;
- u32 res[101];
- void *bin_data;
-};
-
-struct host_image_st {
- struct fw_section_info_st image_section_info[MAX_FW_TYPE_NUM];
- struct {
- u32 up_total_len;
- u32 fw_version;
- } image_info;
- u32 section_type_num;
- u32 device_id;
-};
-
-struct devlink *hinic_devlink_alloc(struct device *dev);
-void hinic_devlink_free(struct devlink *devlink);
-void hinic_devlink_register(struct hinic_devlink_priv *priv);
-void hinic_devlink_unregister(struct hinic_devlink_priv *priv);
-
-int hinic_health_reporters_create(struct hinic_devlink_priv *priv);
-void hinic_health_reporters_destroy(struct hinic_devlink_priv *priv);
-
-#endif /* __HINIC_DEVLINK_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
new file mode 100644
index 000000000000..9a89eed2ddfe
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __HINIC_DFX_DEF_H__
+#define __HINIC_DFX_DEF_H__
+
+enum module_name {
+ SEND_TO_NIC_DRIVER = 1,
+ SEND_TO_HW_DRIVER,
+ SEND_TO_UCODE,
+ SEND_TO_UP,
+ SEND_TO_SM,
+
+ HINICADM_OVS_DRIVER = 6,
+ HINICADM_ROCE_DRIVER,
+ HINICADM_TOE_DRIVER,
+ HINICADM_IWAP_DRIVER,
+ HINICADM_FC_DRIVER,
+ HINICADM_FCOE_DRIVER,
+};
+
+enum driver_cmd_type {
+ TX_INFO = 1,
+ Q_NUM,
+ TX_WQE_INFO,
+ TX_MAPPING,
+ RX_INFO,
+ RX_WQE_INFO,
+ RX_CQE_INFO,
+ UPRINT_FUNC_EN,
+ UPRINT_FUNC_RESET,
+ UPRINT_SET_PATH,
+ UPRINT_GET_STATISTICS,
+ FUNC_TYPE,
+ GET_FUNC_IDX,
+ GET_INTER_NUM,
+ CLOSE_TX_STREAM,
+ GET_DRV_VERSION,
+ CLEAR_FUNC_STASTIC,
+ GET_HW_STATS,
+ CLEAR_HW_STATS,
+ GET_SELF_TEST_RES,
+ GET_CHIP_FAULT_STATS,
+ GET_NUM_COS,
+ SET_COS_UP_MAP,
+ GET_COS_UP_MAP,
+ GET_CHIP_ID,
+ GET_SINGLE_CARD_INFO,
+ GET_FIRMWARE_ACTIVE_STATUS,
+ ROCE_DFX_FUNC,
+ GET_DEVICE_ID,
+ GET_PF_DEV_INFO,
+ CMD_FREE_MEM,
+ GET_LOOPBACK_MODE = 32,
+ SET_LOOPBACK_MODE,
+ SET_LINK_MODE,
+ SET_PF_BW_LIMIT,
+ GET_PF_BW_LIMIT,
+ ROCE_CMD,
+ GET_POLL_WEIGHT,
+ SET_POLL_WEIGHT,
+ GET_HOMOLOGUE,
+ SET_HOMOLOGUE,
+ GET_SSET_COUNT,
+ GET_SSET_ITEMS,
+ IS_DRV_IN_VM,
+ LRO_ADPT_MGMT,
+ SET_INTER_COAL_PARAM,
+ GET_INTER_COAL_PARAM,
+ GET_CHIP_INFO,
+ GET_NIC_STATS_LEN,
+ GET_NIC_STATS_STRING,
+ GET_NIC_STATS_INFO,
+ GET_PF_ID,
+ SET_DCB_CFG,
+ SET_PFC_PRIORITY,
+ GET_PFC_INFO,
+ SET_PFC_CONTROL,
+ SET_ETS,
+ GET_ETS_INFO,
+ GET_SUPPORT_UP,
+ GET_SUPPORT_TC,
+
+ RSS_CFG = 0x40,
+ RSS_INDIR,
+ PORT_ID,
+
+ GET_WIN_STAT = 0x60,
+ WIN_CSR_READ = 0x61,
+ WIN_CSR_WRITE = 0x62,
+ WIN_API_CMD_RD = 0x63,
+
+ GET_NICTOOL_CAP = 0x74,
+
+ VM_COMPAT_TEST = 0xFF
+};
+
+enum hinic_nic_link_mode {
+ HINIC_LINK_MODE_AUTO = 0,
+ HINIC_LINK_MODE_UP,
+ HINIC_LINK_MODE_DOWN,
+ HINIC_LINK_MODE_MAX
+};
+
+enum api_chain_cmd_type {
+ API_CSR_READ,
+ API_CSR_WRITE,
+ API_CSR_READ_8B,
+ API_CSR_WRITE_8B,
+};
+
+enum sm_cmd_type {
+ SM_CTR_RD32 = 1,
+ SM_CTR_RD64_PAIR,
+ SM_CTR_RD64
+};
+
+enum hinic_show_set {
+ HINIC_SHOW_SSET_IO_STATS = 1,
+};
+
+#define HINIC_SHOW_ITEM_LEN 32
+struct hinic_show_item {
+ char name[HINIC_SHOW_ITEM_LEN];
+ u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */
+ u8 rsvd[7];
+ u64 value;
+};
+
+#define UP_UPDATEFW_TIME_OUT_VAL 20000U
+#define UCODE_COMP_TIME_OUT_VAL 0xFF00000
+#define NIC_TOOL_MAGIC 'x'
+
+enum hinic_nictool_drv_cap {
+ NICTOOL_SUPPORT_API_CSR = 0x1,
+};
+
+#endif /* __HINIC_DFX_DEF_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c
new file mode 100644
index 000000000000..f2186200f6f7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c
@@ -0,0 +1,1462 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_hwif.h"
+#include "hinic_csr.h"
+#include "hinic_eqs.h"
+
+#define HINIC_EQS_WQ_NAME "hinic_eqs"
+
+#define AEQ_CTRL_0_INTR_IDX_SHIFT 0
+#define AEQ_CTRL_0_FUNC_BUSY_SHIFT 10
+#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12
+#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
+#define AEQ_CTRL_0_QPS_NUM_SHIFT 22
+#define AEQ_CTRL_0_INTR_MODE_SHIFT 31
+
+#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU
+#define AEQ_CTRL_0_FUNC_BUSY_MASK 0x1U
+#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U
+#define AEQ_CTRL_0_QPS_NUM_MASK 0xFFU
+#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U
+
+#define AEQ_CTRL_0_GET(val, member) \
+ (((val) >> AEQ_CTRL_0_##member##_SHIFT) & \
+ AEQ_CTRL_0_##member##_MASK)
+
+#define AEQ_CTRL_0_SET(val, member) \
+ (((val) & AEQ_CTRL_0_##member##_MASK) << \
+ AEQ_CTRL_0_##member##_SHIFT)
+
+#define AEQ_CTRL_0_CLEAR(val, member) \
+ ((val) & (~(AEQ_CTRL_0_##member##_MASK \
+ << AEQ_CTRL_0_##member##_SHIFT)))
+
+#define AEQ_CTRL_1_LEN_SHIFT 0
+#define AEQ_CTRL_1_FUNC_OWN_SHIFT 21
+#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
+#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
+
+#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU
+#define AEQ_CTRL_1_FUNC_OWN_MASK 0x1U
+#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U
+#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
+
+#define AEQ_CTRL_1_GET(val, member) \
+ (((val) >> AEQ_CTRL_1_##member##_SHIFT) & \
+ AEQ_CTRL_1_##member##_MASK)
+
+#define AEQ_CTRL_1_SET(val, member) \
+ (((val) & AEQ_CTRL_1_##member##_MASK) << \
+ AEQ_CTRL_1_##member##_SHIFT)
+
+#define AEQ_CTRL_1_CLEAR(val, member) \
+ ((val) & (~(AEQ_CTRL_1_##member##_MASK \
+ << AEQ_CTRL_1_##member##_SHIFT)))
+
+#define HINIC_EQ_PROD_IDX_MASK 0xFFFFF
+#define HINIC_TASK_PROCESS_EQE_LIMIT 1024
+#define HINIC_EQ_UPDATE_CI_STEP 64
+
+static uint g_aeq_len = HINIC_DEFAULT_AEQ_LEN;
+module_param(g_aeq_len, uint, 0444);
+MODULE_PARM_DESC(g_aeq_len,
+ "aeq depth, valid range is " __stringify(HINIC_MIN_AEQ_LEN)
+ " - " __stringify(HINIC_MAX_AEQ_LEN));
+
+static uint g_ceq_len = HINIC_DEFAULT_CEQ_LEN;
+module_param(g_ceq_len, uint, 0444);
+MODULE_PARM_DESC(g_ceq_len,
+ "ceq depth, valid range is " __stringify(HINIC_MIN_CEQ_LEN)
+ " - " __stringify(HINIC_MAX_CEQ_LEN));
+
+static uint g_num_ceqe_in_tasklet = HINIC_TASK_PROCESS_EQE_LIMIT;
+module_param(g_num_ceqe_in_tasklet, uint, 0444);
+MODULE_PARM_DESC(g_num_ceqe_in_tasklet,
+ "The max number of ceqe can be processed in tasklet, default = 1024");
+
+#define CEQ_CTRL_0_INTR_IDX_SHIFT 0
+#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12
+#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20
+#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24
+#define CEQ_CTRL_0_INTR_MODE_SHIFT 31
+
+#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU
+#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU
+#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU
+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U
+#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U
+
+#define CEQ_CTRL_0_SET(val, member) \
+ (((val) & CEQ_CTRL_0_##member##_MASK) << \
+ CEQ_CTRL_0_##member##_SHIFT)
+
+#define CEQ_CTRL_1_LEN_SHIFT 0
+#define CEQ_CTRL_1_PAGE_SIZE_SHIFT 28
+
+#define CEQ_CTRL_1_LEN_MASK 0x1FFFFFU
+#define CEQ_CTRL_1_PAGE_SIZE_MASK 0xFU
+
+#define CEQ_CTRL_1_SET(val, member) \
+ (((val) & CEQ_CTRL_1_##member##_MASK) << \
+ CEQ_CTRL_1_##member##_SHIFT)
+
+#define EQ_ELEM_DESC_TYPE_SHIFT 0
+#define EQ_ELEM_DESC_SRC_SHIFT 7
+#define EQ_ELEM_DESC_SIZE_SHIFT 8
+#define EQ_ELEM_DESC_WRAPPED_SHIFT 31
+
+#define EQ_ELEM_DESC_TYPE_MASK 0x7FU
+#define EQ_ELEM_DESC_SRC_MASK 0x1U
+#define EQ_ELEM_DESC_SIZE_MASK 0xFFU
+#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U
+
+#define EQ_ELEM_DESC_GET(val, member) \
+ (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \
+ EQ_ELEM_DESC_##member##_MASK)
+
+#define EQ_CONS_IDX_CONS_IDX_SHIFT 0
+#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24
+#define EQ_CONS_IDX_INT_ARMED_SHIFT 31
+
+#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU
+#define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU
+#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U
+
+#define EQ_CONS_IDX_SET(val, member) \
+ (((val) & EQ_CONS_IDX_##member##_MASK) << \
+ EQ_CONS_IDX_##member##_SHIFT)
+
+#define EQ_CONS_IDX_CLEAR(val, member) \
+ ((val) & (~(EQ_CONS_IDX_##member##_MASK \
+ << EQ_CONS_IDX_##member##_SHIFT)))
+
+#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT)
+
+#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \
+ ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))
+
+#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
+ HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
+ HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
+
+#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
+ HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
+ HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
+
+#define GET_EQ_NUM_PAGES(eq, size) \
+ ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \
+ (size)) / (size)))
+
+#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size)
+
+#define GET_EQ_ELEMENT(eq, idx) \
+ (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \
+ (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
+
+#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *)\
+ GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx)
+
+#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx)
+
+#define PAGE_IN_4K(page_size) ((page_size) >> 12)
+#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \
+ ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))
+
+#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
+#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))
+
+#define AEQ_DMA_ATTR_DEFAULT 0
+#define CEQ_DMA_ATTR_DEFAULT 0
+
+#define CEQ_LMT_KICK_DEFAULT 0
+
+#define EQ_MSIX_RESEND_TIMER_CLEAR 1
+
+#define EQ_WRAPPED_SHIFT 20
+
+#define EQ_VALID_SHIFT 31
+
+#define CEQE_TYPE_SHIFT 23
+#define CEQE_TYPE_MASK 0x7
+
+#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \
+ CEQE_TYPE_MASK)
+
+#define CEQE_DATA_MASK 0x3FFFFFF
+#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK)
+
+#define EQ_MIN_PAGE_SIZE 0x1000U
+#define aeq_to_aeqs(eq) \
+ container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
+
+#define ceq_to_ceqs(eq) \
+ container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
+
+static irqreturn_t aeq_interrupt(int irq, void *data);
+static irqreturn_t ceq_interrupt(int irq, void *data);
+
+/**
+ * hinic_qps_num_set - set the number of queues that are actually opened,
+ * and instructs the migration driver to migrate specified queues
+ * during VF live migration.
+ *
+ * @hwdev: the pointer to hw device
+ * @num_qps: number of queue
+ */
+void hinic_qps_num_set(void *hwdev, u32 num_qps)
+{
+ struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ u32 addr, val, ctrl;
+
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(0);
+ val = hinic_hwif_read_reg(hwif, addr);
+ val = AEQ_CTRL_0_CLEAR(val, QPS_NUM);
+ ctrl = AEQ_CTRL_0_SET(num_qps, QPS_NUM);
+ val |= ctrl;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+u32 hinic_func_busy_state_get(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(0);
+ val = hinic_hwif_read_reg(hwif, addr);
+ return AEQ_CTRL_0_GET(val, FUNC_BUSY);
+}
+
+void hinic_func_busy_state_set(struct hinic_hwdev *hwdev, u32 cfg)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u32 addr, val, ctrl;
+
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(0);
+ val = hinic_hwif_read_reg(hwif, addr);
+ val = AEQ_CTRL_0_CLEAR(val, FUNC_BUSY);
+ ctrl = AEQ_CTRL_0_SET(cfg, FUNC_BUSY);
+ val |= ctrl;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+u32 hinic_func_own_bit_get(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u32 addr, val;
+
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(0);
+ val = hinic_hwif_read_reg(hwif, addr);
+ return AEQ_CTRL_1_GET(val, FUNC_OWN);
+}
+
+void hinic_func_own_bit_set(struct hinic_hwdev *hwdev, u32 cfg)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u32 addr, val, ctrl;
+
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(0);
+ val = hinic_hwif_read_reg(hwif, addr);
+ val = AEQ_CTRL_1_CLEAR(val, FUNC_OWN);
+ ctrl = AEQ_CTRL_1_SET(cfg, FUNC_OWN);
+ val |= ctrl;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+static void ceq_tasklet(ulong eq_tasklet);
+
+static u8 eq_cons_idx_checksum_set(u32 val)
+{
+ u8 checksum = 0;
+ u8 idx;
+
+ for (idx = 0; idx < 32; idx += 4)
+ checksum ^= ((val >> idx) & 0xF);
+
+ return checksum & 0xF;
+}
+
+/**
+ * hinic_aeq_register_hw_cb - register aeq callback for specific event
+ * @hwdev: pointer to hw device
+ * @event: event for the handler
+ * @hw_cb: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_aeq_register_hw_cb(void *hwdev, enum hinic_aeq_type event,
+ hinic_aeq_hwe_cb hwe_cb)
+{
+ struct hinic_aeqs *aeqs;
+
+ if (!hwdev || !hwe_cb || event >= HINIC_MAX_AEQ_EVENTS)
+ return -EINVAL;
+
+ aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
+
+ aeqs->aeq_hwe_cb[event] = hwe_cb;
+
+ set_bit(HINIC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_aeq_register_hw_cb);
+
+/**
+ * hinic_aeq_unregister_hw_cb - unregister the aeq callback for specific event
+ * @hwdev: pointer to hw device
+ * @event: event for the handler
+ */
+void hinic_aeq_unregister_hw_cb(void *hwdev, enum hinic_aeq_type event)
+{
+ struct hinic_aeqs *aeqs;
+
+ if (!hwdev || event >= HINIC_MAX_AEQ_EVENTS)
+ return;
+
+ aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
+
+ clear_bit(HINIC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]);
+
+ while (test_bit(HINIC_AEQ_HW_CB_RUNNING, &aeqs->aeq_hw_cb_state[event]))
+ usleep_range(900, 1000);
+
+ aeqs->aeq_hwe_cb[event] = NULL;
+}
+EXPORT_SYMBOL(hinic_aeq_unregister_hw_cb);
+
+/**
+ * hinic_aeq_register_sw_cb - register aeq callback for sw event
+ * @hwdev: pointer to hw device
+ * @event: soft event for the handler
+ * @sw_cb: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_aeq_register_swe_cb(void *hwdev, enum hinic_aeq_sw_type event,
+ hinic_aeq_swe_cb aeq_swe_cb)
+{
+ struct hinic_aeqs *aeqs;
+
+ if (!hwdev || !aeq_swe_cb || event >= HINIC_MAX_AEQ_SW_EVENTS)
+ return -EINVAL;
+
+ aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
+
+ aeqs->aeq_swe_cb[event] = aeq_swe_cb;
+
+ set_bit(HINIC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_aeq_register_swe_cb);
+
+/**
+ * hinic_aeq_unregister_sw_cb - unregister the aeq callback for sw event
+ * @hwdev: pointer to hw device
+ * @event: soft event for the handler
+ */
+void hinic_aeq_unregister_swe_cb(void *hwdev, enum hinic_aeq_sw_type event)
+{
+ struct hinic_aeqs *aeqs;
+
+ if (!hwdev || event >= HINIC_MAX_AEQ_SW_EVENTS)
+ return;
+
+ aeqs = ((struct hinic_hwdev *)hwdev)->aeqs;
+
+ clear_bit(HINIC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]);
+
+ while (test_bit(HINIC_AEQ_SW_CB_RUNNING, &aeqs->aeq_sw_cb_state[event]))
+ usleep_range(900, 1000);
+
+ aeqs->aeq_swe_cb[event] = NULL;
+}
+EXPORT_SYMBOL(hinic_aeq_unregister_swe_cb);
+
+/**
+ * hinic_ceq_register_sw_cb - register ceq callback for specific event
+ * @hwdev: pointer to hw device
+ * @event: event for the handler
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_ceq_register_cb(void *hwdev, enum hinic_ceq_event event,
+ hinic_ceq_event_cb callback)
+{
+ struct hinic_ceqs *ceqs;
+
+ if (!hwdev || event >= HINIC_MAX_CEQ_EVENTS)
+ return -EINVAL;
+
+ ceqs = ((struct hinic_hwdev *)hwdev)->ceqs;
+
+ ceqs->ceq_cb[event] = callback;
+
+ set_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_ceq_register_cb);
+
+/**
+ * hinic_ceq_unregister_cb - unregister ceq callback for specific event
+ * @hwdev: pointer to hw device
+ * @event: event for the handler
+ */
+void hinic_ceq_unregister_cb(void *hwdev, enum hinic_ceq_event event)
+{
+ struct hinic_ceqs *ceqs;
+
+ if (!hwdev || event >= HINIC_MAX_CEQ_EVENTS)
+ return;
+
+ ceqs = ((struct hinic_hwdev *)hwdev)->ceqs;
+
+ clear_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]);
+
+ while (test_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]))
+ usleep_range(900, 1000);
+
+ ceqs->ceq_cb[event] = NULL;
+}
+EXPORT_SYMBOL(hinic_ceq_unregister_cb);
+
+/**
+ * set_eq_cons_idx - write the cons idx to the hw
+ * @eq: The event queue to update the cons idx for
+ * @arm_state: arm state value
+ */
+static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state)
+{
+ u32 eq_wrap_ci, val;
+ u32 addr = EQ_CONS_IDX_REG_ADDR(eq);
+
+ eq_wrap_ci = EQ_CONS_IDX(eq);
+
+ /* other filed is resverd, set to 0 */
+ val = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
+ EQ_CONS_IDX_SET(arm_state, INT_ARMED);
+
+ val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
+
+ hinic_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+/**
+ * ceq_event_handler - handle for the ceq events
+ * @ceqs: eqs part of the chip
+ * @ceqe: ceq element of the event
+ */
+static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
+{
+ struct hinic_hwdev *hwdev = ceqs->hwdev;
+ enum hinic_ceq_event event = CEQE_TYPE(ceqe);
+ u32 ceqe_data = CEQE_DATA(ceqe);
+
+ if (event >= HINIC_MAX_CEQ_EVENTS) {
+ sdk_err(hwdev->dev_hdl, "Ceq unknown event: %d, ceqe date: 0x%x\n",
+ event, ceqe_data);
+ return;
+ }
+
+ set_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]);
+
+ if (ceqs->ceq_cb[event] &&
+ test_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]))
+ ceqs->ceq_cb[event](hwdev, ceqe_data);
+
+ clear_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]);
+}
+
+/**
+ * aeq_irq_handler - handler for the aeq event
+ * @eq: the async event queue of the event
+ */
+static bool aeq_irq_handler(struct hinic_eq *eq)
+{
+ struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
+ struct hinic_aeq_elem *aeqe_pos;
+ enum hinic_aeq_type event;
+ enum hinic_aeq_sw_type sw_event;
+ enum hinic_ucode_event_type ucode_event;
+ u64 aeqe_data;
+ u32 aeqe_desc;
+ u32 i, eqe_cnt = 0;
+ u8 size;
+ u8 lev;
+
+ for (i = 0; i < HINIC_TASK_PROCESS_EQE_LIMIT; i++) {
+ aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+
+ /* Data in HW is in Big endian Format */
+ aeqe_desc = be32_to_cpu(aeqe_pos->desc);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
+ return false;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the cmdq wqe until we have
+ * verified the command has been processed and
+ * written back.
+ */
+ dma_rmb();
+
+ event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
+ if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
+ ucode_event = event;
+ /* SW event uses only the first 8B */
+ sw_event = ucode_event >= HINIC_NIC_FATAL_ERROR_MAX ?
+ HINIC_STATEFULL_EVENT :
+ HINIC_STATELESS_EVENT;
+ aeqe_data = be64_to_cpu((*(u64 *)aeqe_pos->aeqe_data));
+ set_bit(HINIC_AEQ_SW_CB_RUNNING,
+ &aeqs->aeq_sw_cb_state[sw_event]);
+ if (aeqs->aeq_swe_cb[sw_event] &&
+ test_bit(HINIC_AEQ_SW_CB_REG,
+ &aeqs->aeq_sw_cb_state[sw_event])) {
+ lev = aeqs->aeq_swe_cb[sw_event](aeqs->hwdev,
+ ucode_event,
+ aeqe_data);
+ }
+ clear_bit(HINIC_AEQ_SW_CB_RUNNING,
+ &aeqs->aeq_sw_cb_state[sw_event]);
+ } else {
+ if (event < HINIC_MAX_AEQ_EVENTS) {
+ size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
+ set_bit(HINIC_AEQ_HW_CB_RUNNING,
+ &aeqs->aeq_hw_cb_state[event]);
+ if (aeqs->aeq_hwe_cb[event] &&
+ test_bit(HINIC_AEQ_HW_CB_REG,
+ &aeqs->aeq_hw_cb_state[event]))
+ aeqs->aeq_hwe_cb[event](aeqs->hwdev,
+ aeqe_pos->aeqe_data, size);
+ clear_bit(HINIC_AEQ_HW_CB_RUNNING,
+ &aeqs->aeq_hw_cb_state[event]);
+ } else {
+ sdk_warn(eq->hwdev->dev_hdl,
+ "Unknown aeq hw event %d\n", event);
+ }
+ }
+
+ eq->cons_idx++;
+
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ceq_irq_handler - handler for the ceq event
+ * @eq: the completion event queue of the event
+ * Return: true - success, false - failure
+ */
+static bool ceq_irq_handler(struct hinic_eq *eq)
+{
+ struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
+ u32 ceqe, eqe_cnt = 0;
+ u32 i;
+
+ for (i = 0; i < g_num_ceqe_in_tasklet; i++) {
+ ceqe = *(GET_CURR_CEQ_ELEM(eq));
+ ceqe = be32_to_cpu(ceqe);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
+ return false;
+
+ ceq_event_handler(ceqs, ceqe);
+
+ eq->cons_idx++;
+
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
+ }
+ }
+
+ return true;
+}
+
+static void reschedule_eq_handler(struct hinic_eq *eq)
+{
+ if (eq->type == HINIC_AEQ) {
+ struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
+
+ queue_work(aeqs->workq, &eq->aeq_work);
+ } else {
+ tasklet_schedule(&eq->ceq_tasklet);
+ }
+}
+
+int hinic_reschedule_eq(struct hinic_hwdev *hwdev, enum hinic_eq_type type,
+ u16 eq_id)
+{
+ if (type == HINIC_AEQ) {
+ if (eq_id >= hwdev->aeqs->num_aeqs)
+ return -EINVAL;
+
+ reschedule_eq_handler(&hwdev->aeqs->aeq[eq_id]);
+ } else {
+ if (eq_id >= hwdev->ceqs->num_ceqs)
+ return -EINVAL;
+
+ reschedule_eq_handler(&hwdev->ceqs->ceq[eq_id]);
+ }
+
+ return 0;
+}
+
+/**
+ * eq_irq_handler - handler for the eq event
+ * @data: the event queue of the event
+ * Return: true - success, false - failure
+ */
+static bool eq_irq_handler(void *data)
+{
+ struct hinic_eq *eq = (struct hinic_eq *)data;
+ bool uncompleted;
+
+ if (eq->type == HINIC_AEQ)
+ uncompleted = aeq_irq_handler(eq);
+ else
+ uncompleted = ceq_irq_handler(eq);
+
+ set_eq_cons_idx(eq, uncompleted ? HINIC_EQ_NOT_ARMED : HINIC_EQ_ARMED);
+
+ return uncompleted;
+}
+
+static struct hinic_eq *find_eq(struct hinic_hwdev *hwdev, int msix_entry_idx)
+{
+ struct hinic_aeqs *aeqs = hwdev->aeqs;
+ struct hinic_ceqs *ceqs = hwdev->ceqs;
+ int i;
+
+ for (i = 0; i < aeqs->num_aeqs; i++) {
+ struct hinic_eq *eq = &aeqs->aeq[i];
+
+ if (eq->eq_irq.msix_entry_idx == msix_entry_idx)
+ return eq;
+ }
+
+ for (i = 0; i < ceqs->num_ceqs; i++) {
+ struct hinic_eq *eq = &ceqs->ceq[i];
+
+ if (eq->eq_irq.msix_entry_idx == msix_entry_idx)
+ return eq;
+ }
+
+ return NULL;
+}
+
+/* for windows */
+bool hinic_eq_intr_handler(void *hwdev, int msix_entry_idx)
+{
+ struct hinic_eq *eq;
+
+ eq = find_eq(hwdev, msix_entry_idx);
+ if (!eq) {
+ pr_err("Can't find eq in eq interrupt handler\n");
+ return false;
+ }
+
+ return eq_irq_handler(eq);
+}
+
+/**
+ * eq_irq_work - eq work for the event
+ * @work: the work that is associated with the eq
+ */
+static void eq_irq_work(struct work_struct *work)
+{
+ struct hinic_eq *eq = container_of(work, struct hinic_eq, aeq_work);
+
+ if (eq_irq_handler(eq))
+ reschedule_eq_handler(eq);
+}
+
+/**
+ * aeq_interrupt - aeq interrupt handler
+ * @irq: irq number
+ * @data: the async event queue of the event
+ */
+static irqreturn_t aeq_interrupt(int irq, void *data)
+{
+ struct hinic_eq *aeq = (struct hinic_eq *)data;
+ struct hinic_hwdev *hwdev = aeq->hwdev;
+ struct hinic_aeqs *aeqs = aeq_to_aeqs(aeq);
+
+ /* clear resend timer cnt register */
+ hinic_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+
+ queue_work(aeqs->workq, &aeq->aeq_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ceq_tasklet - ceq tasklet for the event
+ * @ceq_data: data that will be used by the tasklet(ceq)
+ */
+static void ceq_tasklet(ulong ceq_data)
+{
+ struct hinic_eq *eq = (struct hinic_eq *)ceq_data;
+
+ eq->soft_intr_jif = jiffies;
+
+ if (eq_irq_handler(eq))
+ reschedule_eq_handler(eq);
+}
+
+/**
+ * ceq_interrupt - ceq interrupt handler
+ * @irq: irq number
+ * @data: the completion event queue of the event
+ */
+static irqreturn_t ceq_interrupt(int irq, void *data)
+{
+ struct hinic_eq *ceq = (struct hinic_eq *)data;
+
+ ceq->hard_intr_jif = jiffies;
+
+ /* clear resend timer counters */
+ hinic_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+
+ tasklet_schedule(&ceq->ceq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+struct hinic_ceq_ctrl_reg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+};
+
+static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id,
+ u32 ctrl0, u32 ctrl1)
+{
+ struct hinic_ceq_ctrl_reg ceq_ctrl = {0};
+ u16 in_size = sizeof(ceq_ctrl);
+ u16 out_size = sizeof(ceq_ctrl);
+ int err;
+
+ err = hinic_global_func_id_get(hwdev, &ceq_ctrl.func_id);
+ if (err)
+ return err;
+
+ ceq_ctrl.q_id = q_id;
+ ceq_ctrl.ctrl0 = ctrl0;
+ ceq_ctrl.ctrl1 = ctrl1;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP,
+ &ceq_ctrl, in_size,
+ &ceq_ctrl, &out_size, 0);
+ if (err || !out_size || ceq_ctrl.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
+ q_id, err, ceq_ctrl.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * set_eq_ctrls - setting eq's ctrls registers
+ * @eq: the event queue for setting
+ * Return: 0 - success, negative - failure
+ */
+static int set_eq_ctrls(struct hinic_eq *eq)
+{
+ enum hinic_eq_type type = eq->type;
+ struct hinic_hwif *hwif = eq->hwdev->hwif;
+ struct irq_info *eq_irq = &eq->eq_irq;
+ u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;
+ u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif);
+ int err;
+
+ if (type == HINIC_AEQ) {
+ /* set ctrl0 */
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &
+ AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
+ AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
+ AEQ_CTRL_0_CLEAR(val, INTR_MODE);
+
+ if (HINIC_IS_VF(eq->hwdev)) {
+ val = AEQ_CTRL_0_CLEAR(val, FUNC_BUSY) &
+ AEQ_CTRL_1_CLEAR(val, FUNC_OWN);
+ }
+
+ ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+ AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) |
+ AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
+ val |= ctrl0;
+
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ /* set ctrl1 */
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+
+ page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+ elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
+
+ ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
+ AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+ hinic_hwif_write_reg(hwif, addr, ctrl1);
+
+ } else {
+ ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+ CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) |
+ CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) |
+ CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ CEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
+
+ page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+
+ ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+ /* set ceq ctrl reg through mgmt cpu */
+ err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ceq_elements_init - Initialize all the elements in the ceq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ */
+static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+ u32 i;
+ u32 *ceqe;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ ceqe = GET_CEQ_ELEM(eq, i);
+ *(ceqe) = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Write the init values */
+}
+
+/**
+ * aeq_elements_init - initialize all the elements in the aeq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ */
+static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+ struct hinic_aeq_elem *aeqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ aeqe = GET_AEQ_ELEM(eq, i);
+ aeqe->desc = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Write the init values */
+}
+
+/**
+ * alloc_eq_pages - allocate the pages for the queue
+ * @eq: the event queue
+ */
+static int alloc_eq_pages(struct hinic_eq *eq)
+{
+ struct hinic_hwif *hwif = eq->hwdev->hwif;
+ u32 init_val;
+ u64 dma_addr_size, virt_addr_size;
+ u16 pg_num, i;
+ u32 reg;
+ int err;
+ u8 flag = 0;
+
+ dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);
+ virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);
+
+ eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);
+ if (!eq->dma_addr)
+ return -ENOMEM;
+
+ eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);
+ if (!eq->virt_addr) {
+ err = -ENOMEM;
+ goto virt_addr_alloc_err;
+ }
+
+ eq->dma_addr_for_free = kzalloc(dma_addr_size, GFP_KERNEL);
+ if (!eq->dma_addr_for_free) {
+ err = -ENOMEM;
+ goto dma_addr_free_alloc_err;
+ }
+
+ eq->virt_addr_for_free = kzalloc(virt_addr_size, GFP_KERNEL);
+ if (!eq->virt_addr_for_free) {
+ err = -ENOMEM;
+ goto virt_addr_free_alloc_err;
+ }
+
+ for (pg_num = 0; pg_num < eq->num_pages; pg_num++) {
+ eq->virt_addr_for_free[pg_num] = dma_alloc_coherent
+ (eq->hwdev->dev_hdl, eq->page_size,
+ &eq->dma_addr_for_free[pg_num], GFP_KERNEL);
+ if (!eq->virt_addr_for_free[pg_num]) {
+ err = -ENOMEM;
+ goto dma_alloc_err;
+ }
+
+ eq->dma_addr[pg_num] = eq->dma_addr_for_free[pg_num];
+ eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num];
+ if (eq->dma_addr_for_free[pg_num] & (eq->page_size - 1)) {
+ sdk_info(eq->hwdev->dev_hdl,
+ "Address is not aligned to %u-bytes as hardware required\n",
+ eq->page_size);
+ sdk_info(eq->hwdev->dev_hdl, "Change eq's page size %u\n",
+ ((eq->page_size) >> 1));
+ eq->dma_addr[pg_num] = ALIGN
+ (eq->dma_addr_for_free[pg_num],
+ (u64)((eq->page_size) >> 1));
+ eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num] +
+ ((u64)eq->dma_addr[pg_num]
+ - (u64)eq->dma_addr_for_free[pg_num]);
+ flag = 1;
+ }
+ reg = HINIC_EQ_HI_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num);
+ hinic_hwif_write_reg(hwif, reg,
+ upper_32_bits(eq->dma_addr[pg_num]));
+
+ reg = HINIC_EQ_LO_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num);
+ hinic_hwif_write_reg(hwif, reg,
+ lower_32_bits(eq->dma_addr[pg_num]));
+ }
+
+ if (flag) {
+ eq->page_size = eq->page_size >> 1;
+ eq->eq_len = eq->eq_len >> 1;
+ }
+
+ eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size);
+ if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
+ sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n");
+ err = -EINVAL;
+ goto dma_alloc_err;
+ }
+ init_val = EQ_WRAPPED(eq);
+
+ if (eq->type == HINIC_AEQ)
+ aeq_elements_init(eq, init_val);
+ else
+ ceq_elements_init(eq, init_val);
+
+ return 0;
+
+dma_alloc_err:
+ for (i = 0; i < pg_num; i++)
+ dma_free_coherent(eq->hwdev->dev_hdl, eq->page_size,
+ eq->virt_addr_for_free[i],
+ eq->dma_addr_for_free[i]);
+ kfree(eq->virt_addr_for_free);
+virt_addr_free_alloc_err:
+ kfree(eq->dma_addr_for_free);
+dma_addr_free_alloc_err:
+ kfree(eq->virt_addr);
+virt_addr_alloc_err:
+ kfree(eq->dma_addr);
+ return err;
+}
+
+/**
+ * free_eq_pages - free the pages of the queue
+ * @eq: the event queue
+ */
+static void free_eq_pages(struct hinic_eq *eq)
+{
+ struct hinic_hwdev *hwdev = eq->hwdev;
+ u16 pg_num;
+
+ for (pg_num = 0; pg_num < eq->num_pages; pg_num++)
+ dma_free_coherent(hwdev->dev_hdl, eq->orig_page_size,
+ eq->virt_addr_for_free[pg_num],
+ eq->dma_addr_for_free[pg_num]);
+
+ kfree(eq->virt_addr_for_free);
+ kfree(eq->dma_addr_for_free);
+ kfree(eq->virt_addr);
+ kfree(eq->dma_addr);
+}
+
+static inline u32 get_page_size(struct hinic_eq *eq)
+{
+ u32 total_size;
+ u16 count, n = 0;
+
+ total_size = ALIGN((eq->eq_len * eq->elem_size), EQ_MIN_PAGE_SIZE);
+
+ if (total_size <= (HINIC_EQ_MAX_PAGES * EQ_MIN_PAGE_SIZE))
+ return EQ_MIN_PAGE_SIZE;
+
+ count = (u16)(ALIGN((total_size / HINIC_EQ_MAX_PAGES),
+ EQ_MIN_PAGE_SIZE) / EQ_MIN_PAGE_SIZE);
+
+ if (!(count & (count - 1)))
+ return EQ_MIN_PAGE_SIZE * count;
+
+ while (count) {
+ count >>= 1;
+ n++;
+ }
+
+ return EQ_MIN_PAGE_SIZE << n;
+}
+
+/**
+ * init_eq - initialize eq
+ * @eq: the event queue
+ * @hwdev: the pointer to hw device
+ * @q_id: Queue id number
+ * @q_len: the number of EQ elements
+ * @type: the type of the event queue, ceq or aeq
+ * @entry: msix entry associated with the event queue
+ * Return: 0 - Success, Negative - failure
+ */
+static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id,
+ u32 q_len, enum hinic_eq_type type, struct irq_info *entry)
+{
+ int err = 0;
+
+ eq->hwdev = hwdev;
+ eq->q_id = q_id;
+ eq->type = type;
+ eq->eq_len = q_len;
+
+ /* clear eq_len to force eqe drop in hardware */
+ if (eq->type == HINIC_AEQ)
+ hinic_hwif_write_reg(eq->hwdev->hwif,
+ HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+ else
+ set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+
+ eq->cons_idx = 0;
+ eq->wrapped = 0;
+
+ eq->elem_size = (type == HINIC_AEQ) ?
+ HINIC_AEQE_SIZE : HINIC_CEQE_SIZE;
+
+ eq->page_size = get_page_size(eq);
+ eq->orig_page_size = eq->page_size;
+ eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size);
+ if (eq->num_pages > HINIC_EQ_MAX_PAGES) {
+ sdk_err(hwdev->dev_hdl, "Number pages: %d too many pages for eq\n",
+ eq->num_pages);
+ return -EINVAL;
+ }
+
+ err = alloc_eq_pages(eq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n");
+ return err;
+ }
+
+ eq->eq_irq.msix_entry_idx = entry->msix_entry_idx;
+ eq->eq_irq.irq_id = entry->irq_id;
+
+ err = set_eq_ctrls(eq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n");
+ goto init_eq_ctrls_err;
+ }
+
+ hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+ set_eq_cons_idx(eq, HINIC_EQ_ARMED);
+
+ if (type == HINIC_AEQ)
+ INIT_WORK(&eq->aeq_work, eq_irq_work);
+ else
+ tasklet_init(&eq->ceq_tasklet, ceq_tasklet, (ulong)eq);
+
+ if (type == HINIC_AEQ) {
+ err = snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic_aeq%d@pci:%s", eq->q_id,
+ pci_name(hwdev->pcidev_hdl));
+ if (err <= 0 || err >= (int)sizeof(eq->irq_name)) {
+ sdk_err(hwdev->dev_hdl, "Failed snprintf irq_name, function return(%d) and dest_len(%d)\n",
+ err, (int)sizeof(eq->irq_name));
+ err = -EINVAL;
+ goto req_irq_err;
+ }
+ err = request_irq(entry->irq_id, aeq_interrupt, 0UL,
+ eq->irq_name, eq);
+ } else {
+ err = snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic_ceq%d@pci:%s", eq->q_id,
+ pci_name(hwdev->pcidev_hdl));
+ if (err <= 0 || err >= (int)sizeof(eq->irq_name)) {
+ sdk_err(hwdev->dev_hdl, "Failed snprintf irq_name, function return(%d) and dest_len(%d)\n",
+ err, (int)sizeof(eq->irq_name));
+ err = -EINVAL;
+ goto req_irq_err;
+ }
+ err = request_irq(entry->irq_id, ceq_interrupt, 0UL,
+ eq->irq_name, eq);
+ }
+
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to request irq for the eq, err: %d\n",
+ err);
+ goto req_irq_err;
+ }
+
+ hinic_set_msix_state(hwdev, entry->msix_entry_idx, HINIC_MSIX_ENABLE);
+
+ return 0;
+
+init_eq_ctrls_err:
+req_irq_err:
+ free_eq_pages(eq);
+ return err;
+}
+
+/**
+ * remove_eq - remove eq
+ * @eq: the event queue
+ */
+static void remove_eq(struct hinic_eq *eq)
+{
+ struct irq_info *entry = &eq->eq_irq;
+
+ hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx,
+ HINIC_MSIX_DISABLE);
+ synchronize_irq(entry->irq_id);
+
+ free_irq(entry->irq_id, eq);
+
+ if (eq->type == HINIC_AEQ) {
+ cancel_work_sync(&eq->aeq_work);
+
+ /* clear eq_len to avoid hw access host memory */
+ hinic_hwif_write_reg(eq->hwdev->hwif,
+ HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+ } else {
+ tasklet_kill(&eq->ceq_tasklet);
+
+ set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+ }
+
+ /* update cons_idx to avoid invalid interrupt */
+ eq->cons_idx = hinic_hwif_read_reg(eq->hwdev->hwif,
+ EQ_PROD_IDX_REG_ADDR(eq));
+ set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
+
+ free_eq_pages(eq);
+}
+
+/**
+ * hinic_aeqs_init - init all the aeqs
+ * @hwdev: the pointer to hw device
+ * @num_aeqs: number of AEQs
+ * @msix_entries: msix entries associated with the event queues
+ * Return: 0 - Success, Negative - failure
+ */
+int hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
+ struct irq_info *msix_entries)
+{
+ struct hinic_aeqs *aeqs;
+ int err;
+ u16 i, q_id;
+ u32 aeq_len;
+
+ aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+ if (!aeqs)
+ return -ENOMEM;
+
+ hwdev->aeqs = aeqs;
+ aeqs->hwdev = hwdev;
+ aeqs->num_aeqs = num_aeqs;
+
+ aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
+ if (!aeqs->workq) {
+ sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n");
+ err = -ENOMEM;
+ goto create_work_err;
+ }
+
+ if (g_aeq_len < HINIC_MIN_AEQ_LEN || g_aeq_len > HINIC_MAX_AEQ_LEN) {
+ sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %d out of range, resetting to %d\n",
+ g_aeq_len, HINIC_DEFAULT_AEQ_LEN);
+ g_aeq_len = HINIC_DEFAULT_AEQ_LEN;
+ }
+
+ if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF &&
+ hwdev->hwif->chip_mode != CHIP_MODE_NORMAL)
+ aeq_len = HINIC_VMGW_DEFAULT_AEQ_LEN;
+ else
+ aeq_len = g_aeq_len;
+
+ for (q_id = 0; q_id < num_aeqs; q_id++) {
+ err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, aeq_len,
+ HINIC_AEQ, &msix_entries[q_id]);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init aeq %d\n",
+ q_id);
+ goto init_aeq_err;
+ }
+ }
+
+ return 0;
+
+init_aeq_err:
+ for (i = 0; i < q_id; i++)
+ remove_eq(&aeqs->aeq[i]);
+
+ destroy_workqueue(aeqs->workq);
+
+create_work_err:
+ kfree(aeqs);
+
+ return err;
+}
+
+/**
+ * hinic_aeqs_free - free all the aeqs
+ * @hwdev: the pointer to hw device
+ */
+void hinic_aeqs_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_aeqs *aeqs = hwdev->aeqs;
+ enum hinic_aeq_type aeq_event = HINIC_HW_INTER_INT;
+ enum hinic_aeq_sw_type sw_aeq_event = HINIC_STATELESS_EVENT;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++)
+ remove_eq(&aeqs->aeq[q_id]);
+
+ for (; sw_aeq_event < HINIC_MAX_AEQ_SW_EVENTS; sw_aeq_event++)
+ hinic_aeq_unregister_swe_cb(hwdev, sw_aeq_event);
+
+ for (; aeq_event < HINIC_MAX_AEQ_EVENTS; aeq_event++)
+ hinic_aeq_unregister_hw_cb(hwdev, aeq_event);
+
+ destroy_workqueue(aeqs->workq);
+
+ kfree(aeqs);
+}
+
+/**
+ * hinic_ceqs_init - init all the ceqs
+ * @hwdev: the pointer to hw device
+ * @num_ceqs: number of CEQs
+ * @msix_entries: msix entries associated with the event queues
+ * Return: 0 - Success, Negative - failure
+ */
+int hinic_ceqs_init(struct hinic_hwdev *hwdev, u16 num_ceqs,
+ struct irq_info *msix_entries)
+{
+ struct hinic_ceqs *ceqs;
+ int err;
+ u16 i, q_id;
+ u32 ceq_len;
+
+ ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL);
+ if (!ceqs)
+ return -ENOMEM;
+
+ hwdev->ceqs = ceqs;
+ ceqs->hwdev = hwdev;
+ ceqs->num_ceqs = num_ceqs;
+
+ if (g_ceq_len < HINIC_MIN_CEQ_LEN || g_ceq_len > HINIC_MAX_CEQ_LEN) {
+ sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %d out of range, resetting to %d\n",
+ g_ceq_len, HINIC_DEFAULT_CEQ_LEN);
+ g_ceq_len = HINIC_DEFAULT_CEQ_LEN;
+ }
+
+ if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF &&
+ hwdev->hwif->chip_mode != CHIP_MODE_NORMAL)
+ ceq_len = HINIC_VMGW_DEFAULT_CEQ_LEN;
+ else
+ ceq_len = g_ceq_len;
+
+ if (!g_num_ceqe_in_tasklet) {
+ sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n",
+ HINIC_TASK_PROCESS_EQE_LIMIT);
+ g_num_ceqe_in_tasklet = HINIC_TASK_PROCESS_EQE_LIMIT;
+ }
+
+ for (q_id = 0; q_id < num_ceqs; q_id++) {
+ err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, ceq_len,
+ HINIC_CEQ, &msix_entries[q_id]);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init ceq %d\n",
+ q_id);
+ goto init_ceq_err;
+ }
+ }
+
+ return 0;
+
+init_ceq_err:
+ for (i = 0; i < q_id; i++)
+ remove_eq(&ceqs->ceq[i]);
+
+ kfree(ceqs);
+
+ return err;
+}
+
+/**
+ * hinic_ceqs_free - free all the ceqs
+ * @hwdev: the pointer to hw device
+ */
+void hinic_ceqs_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_ceqs *ceqs = hwdev->ceqs;
+ enum hinic_ceq_event ceq_event = HINIC_CMDQ;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
+ remove_eq(&ceqs->ceq[q_id]);
+
+ for (; ceq_event < HINIC_MAX_CEQ_EVENTS; ceq_event++)
+ hinic_ceq_unregister_cb(hwdev, ceq_event);
+
+ kfree(ceqs);
+}
+
+void hinic_get_ceq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs)
+{
+ struct hinic_ceqs *ceqs = hwdev->ceqs;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id;
+ irqs[q_id].msix_entry_idx =
+ ceqs->ceq[q_id].eq_irq.msix_entry_idx;
+ }
+
+ *num_irqs = ceqs->num_ceqs;
+}
+
+void hinic_get_aeq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs)
+{
+ struct hinic_aeqs *aeqs = hwdev->aeqs;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id;
+ irqs[q_id].msix_entry_idx =
+ aeqs->aeq[q_id].eq_irq.msix_entry_idx;
+ }
+
+ *num_irqs = aeqs->num_aeqs;
+}
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
+{
+ struct hinic_aeq_elem *aeqe_pos;
+ struct hinic_eq *eq;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
+ eq = &hwdev->aeqs->aeq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+ aeqe_pos = GET_CURR_AEQ_ELEM(eq);
+ sdk_err(hwdev->dev_hdl, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n",
+ q_id, ci, pi, work_busy(&eq->aeq_work),
+ eq->wrapped, be32_to_cpu(aeqe_pos->desc));
+ }
+}
+
+void hinic_dump_ceq_info(struct hinic_hwdev *hwdev)
+{
+ struct hinic_eq *eq;
+ u32 addr, ci, pi;
+ int q_id;
+
+ for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) {
+ eq = &hwdev->ceqs->ceq[q_id];
+ addr = EQ_CONS_IDX_REG_ADDR(eq);
+ ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+ addr = EQ_PROD_IDX_REG_ADDR(eq);
+ pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+ sdk_err(hwdev->dev_hdl, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n",
+ q_id, ci, eq->cons_idx, pi,
+ tasklet_state(&eq->ceq_tasklet),
+ eq->wrapped, be32_to_cpu(*(GET_CURR_CEQ_ELEM(eq))));
+ sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n",
+ jiffies_to_msecs(jiffies - eq->hard_intr_jif));
+ sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n",
+ jiffies_to_msecs(jiffies - eq->soft_intr_jif));
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h
new file mode 100644
index 000000000000..81c34ca573d4
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_EQS_H
+#include <linux/interrupt.h>
+
+#define HINIC_EQS_H
+
+#define HINIC_EQ_PAGE_SIZE 0x00001000
+
+#define HINIC_HW_MAX_AEQS 4
+#define HINIC_MAX_AEQS 3
+#define HINIC_MAX_CEQS 32
+
+#define HINIC_EQ_MAX_PAGES 8
+
+#define HINIC_AEQE_SIZE 64
+#define HINIC_CEQE_SIZE 4
+
+#define HINIC_AEQE_DESC_SIZE 4
+#define HINIC_AEQE_DATA_SIZE \
+ (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
+
+#define HINIC_DEFAULT_AEQ_LEN 0x10000
+#define HINIC_DEFAULT_CEQ_LEN 0x10000
+
+#define HINIC_VMGW_DEFAULT_AEQ_LEN 128
+#define HINIC_VMGW_DEFAULT_CEQ_LEN 1024
+
+#define HINIC_MIN_AEQ_LEN 64
+#define HINIC_MAX_AEQ_LEN (512 * 1024)
+#define HINIC_MIN_CEQ_LEN 64
+#define HINIC_MAX_CEQ_LEN (1024 * 1024)
+
+#define HINIC_CEQ_ID_CMDQ 0
+
+#define EQ_IRQ_NAME_LEN 64
+
+enum hinic_eq_type {
+ HINIC_AEQ,
+ HINIC_CEQ
+};
+
+enum hinic_eq_intr_mode {
+ HINIC_INTR_MODE_ARMED,
+ HINIC_INTR_MODE_ALWAYS,
+};
+
+enum hinic_eq_ci_arm_state {
+ HINIC_EQ_NOT_ARMED,
+ HINIC_EQ_ARMED,
+};
+
+struct hinic_eq {
+ struct hinic_hwdev *hwdev;
+ u16 q_id;
+ enum hinic_eq_type type;
+ u32 page_size;
+ u32 orig_page_size;
+ u32 eq_len;
+
+ u32 cons_idx;
+ u16 wrapped;
+
+ u16 elem_size;
+ u16 num_pages;
+ u32 num_elem_in_pg;
+
+ struct irq_info eq_irq;
+ char irq_name[EQ_IRQ_NAME_LEN];
+
+ dma_addr_t *dma_addr;
+ u8 **virt_addr;
+ dma_addr_t *dma_addr_for_free;
+ u8 **virt_addr_for_free;
+
+ struct work_struct aeq_work;
+ struct tasklet_struct ceq_tasklet;
+
+ u64 hard_intr_jif;
+ u64 soft_intr_jif;
+};
+
+struct hinic_aeq_elem {
+ u8 aeqe_data[HINIC_AEQE_DATA_SIZE];
+ u32 desc;
+};
+
+enum hinic_aeq_cb_state {
+ HINIC_AEQ_HW_CB_REG = 0,
+ HINIC_AEQ_HW_CB_RUNNING,
+ HINIC_AEQ_SW_CB_REG,
+ HINIC_AEQ_SW_CB_RUNNING,
+};
+
+struct hinic_aeqs {
+ struct hinic_hwdev *hwdev;
+
+ hinic_aeq_hwe_cb aeq_hwe_cb[HINIC_MAX_AEQ_EVENTS];
+ hinic_aeq_swe_cb aeq_swe_cb[HINIC_MAX_AEQ_SW_EVENTS];
+ unsigned long aeq_hw_cb_state[HINIC_MAX_AEQ_EVENTS];
+ unsigned long aeq_sw_cb_state[HINIC_MAX_AEQ_SW_EVENTS];
+
+ struct hinic_eq aeq[HINIC_MAX_AEQS];
+ u16 num_aeqs;
+
+ struct workqueue_struct *workq;
+};
+
+enum hinic_ceq_cb_state {
+ HINIC_CEQ_CB_REG = 0,
+ HINIC_CEQ_CB_RUNNING,
+};
+
+struct hinic_ceqs {
+ struct hinic_hwdev *hwdev;
+
+ hinic_ceq_event_cb ceq_cb[HINIC_MAX_CEQ_EVENTS];
+ void *ceq_data[HINIC_MAX_CEQ_EVENTS];
+ unsigned long ceq_cb_state[HINIC_MAX_CEQ_EVENTS];
+
+ struct hinic_eq ceq[HINIC_MAX_CEQS];
+ u16 num_ceqs;
+};
+
+enum hinic_msg_pipe_state {
+ PIPE_STATE_IDLE,
+ PIPE_STATE_BUSY,
+ PIPE_STATE_SUSPEND,
+};
+
+#define PIPE_CYCLE_MAX 10000
+
+u32 hinic_func_busy_state_get(struct hinic_hwdev *hwdev);
+
+void hinic_func_busy_state_set(struct hinic_hwdev *hwdev, u32 cfg);
+
+u32 hinic_func_own_bit_get(struct hinic_hwdev *hwdev);
+
+void hinic_func_own_bit_set(struct hinic_hwdev *hwdev, u32 cfg);
+
+int hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
+ struct irq_info *msix_entries);
+
+void hinic_aeqs_free(struct hinic_hwdev *hwdev);
+
+int hinic_ceqs_init(struct hinic_hwdev *hwdev, u16 num_ceqs,
+ struct irq_info *msix_entries);
+
+void hinic_ceqs_free(struct hinic_hwdev *hwdev);
+
+void hinic_get_ceq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs);
+
+void hinic_get_aeq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs,
+ u16 *num_irqs);
+
+void hinic_dump_ceq_info(struct hinic_hwdev *hwdev);
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);
+
+int hinic_reschedule_eq(struct hinic_hwdev *hwdev, enum hinic_eq_type type,
+ u16 eq_id);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index f4b680286911..dae540b075d4 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -12,6 +12,7 @@
* for more details.
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -25,58 +26,341 @@
#include <linux/if_vlan.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
-#include <linux/sfp.h>
-#include "hinic_hw_qp.h"
-#include "hinic_hw_dev.h"
-#include "hinic_port.h"
+#include "ossl_knl.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic_dev.h"
+#include "hinic_dfx_def.h"
#include "hinic_tx.h"
#include "hinic_rx.h"
-#include "hinic_dev.h"
+#include "hinic_qp.h"
-#define SET_LINK_STR_MAX_LEN 16
+#ifndef SET_ETHTOOL_OPS
+#define SET_ETHTOOL_OPS(netdev, ops) \
+ ((netdev)->ethtool_ops = (ops))
+#endif
-#define GET_SUPPORTED_MODE 0
-#define GET_ADVERTISED_MODE 1
+struct hinic_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 size;
+ int offset;
+};
-#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
- ((ecmd)->supported |= \
- (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
-#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
- ((ecmd)->advertising |= \
- (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
-#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
- ((ecmd)->supported |= SUPPORTED_##mode)
-#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
- ((ecmd)->advertising |= ADVERTISED_##mode)
+#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0])))
-#define COALESCE_PENDING_LIMIT_UNIT 8
-#define COALESCE_TIMER_CFG_UNIT 9
-#define COALESCE_ALL_QUEUE 0xFFFF
-#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
-#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
+#define HINIC_NETDEV_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .size = sizeof_field(struct rtnl_link_stats64, _stat_item), \
+ .offset = offsetof(struct rtnl_link_stats64, _stat_item) \
+}
-struct hw2ethtool_link_mode {
- enum ethtool_link_mode_bit_indices link_mode_bit;
- u32 speed;
- enum hinic_link_mode hw_link_mode;
+static struct hinic_stats hinic_netdev_stats[] = {
+ HINIC_NETDEV_STAT(rx_packets),
+ HINIC_NETDEV_STAT(tx_packets),
+ HINIC_NETDEV_STAT(rx_bytes),
+ HINIC_NETDEV_STAT(tx_bytes),
+ HINIC_NETDEV_STAT(rx_errors),
+ HINIC_NETDEV_STAT(tx_errors),
+ HINIC_NETDEV_STAT(rx_dropped),
+ HINIC_NETDEV_STAT(tx_dropped),
+ HINIC_NETDEV_STAT(multicast),
+ HINIC_NETDEV_STAT(collisions),
+ HINIC_NETDEV_STAT(rx_length_errors),
+ HINIC_NETDEV_STAT(rx_over_errors),
+ HINIC_NETDEV_STAT(rx_crc_errors),
+ HINIC_NETDEV_STAT(rx_frame_errors),
+ HINIC_NETDEV_STAT(rx_fifo_errors),
+ HINIC_NETDEV_STAT(rx_missed_errors),
+ HINIC_NETDEV_STAT(tx_aborted_errors),
+ HINIC_NETDEV_STAT(tx_carrier_errors),
+ HINIC_NETDEV_STAT(tx_fifo_errors),
+ HINIC_NETDEV_STAT(tx_heartbeat_errors),
};
-struct cmd_link_settings {
- u64 supported;
- u64 advertising;
+#define HINIC_NIC_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .size = sizeof_field(struct hinic_nic_stats, _stat_item), \
+ .offset = offsetof(struct hinic_nic_stats, _stat_item) \
+}
- u32 speed;
- u8 duplex;
- u8 port;
- u8 autoneg;
+static struct hinic_stats hinic_nic_dev_stats[] = {
+ HINIC_NIC_STAT(netdev_tx_timeout),
};
-static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
- SPEED_10, SPEED_100,
- SPEED_1000, SPEED_10000,
- SPEED_25000, SPEED_40000,
- SPEED_100000
+static struct hinic_stats hinic_nic_dev_stats_extern[] = {
+ HINIC_NIC_STAT(tx_carrier_off_drop),
+ HINIC_NIC_STAT(tx_invalid_qid),
+};
+
+#define HINIC_RXQ_STAT(_stat_item) { \
+ .name = "rxq%d_"#_stat_item, \
+ .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
+ .offset = offsetof(struct hinic_rxq_stats, _stat_item) \
+}
+
+#define HINIC_TXQ_STAT(_stat_item) { \
+ .name = "txq%d_"#_stat_item, \
+ .size = sizeof_field(struct hinic_txq_stats, _stat_item), \
+ .offset = offsetof(struct hinic_txq_stats, _stat_item) \
+}
+
+/*lint -save -e786*/
+static struct hinic_stats hinic_rx_queue_stats[] = {
+ HINIC_RXQ_STAT(packets),
+ HINIC_RXQ_STAT(bytes),
+ HINIC_RXQ_STAT(errors),
+ HINIC_RXQ_STAT(csum_errors),
+ HINIC_RXQ_STAT(other_errors),
+ HINIC_RXQ_STAT(dropped),
+ HINIC_RXQ_STAT(rx_buf_empty),
+};
+
+static struct hinic_stats hinic_rx_queue_stats_extern[] = {
+ HINIC_RXQ_STAT(alloc_skb_err),
+ HINIC_RXQ_STAT(alloc_rx_buf_err),
+ HINIC_RXQ_STAT(map_rx_buf_err),
+};
+
+static struct hinic_stats hinic_tx_queue_stats[] = {
+ HINIC_TXQ_STAT(packets),
+ HINIC_TXQ_STAT(bytes),
+ HINIC_TXQ_STAT(busy),
+ HINIC_TXQ_STAT(wake),
+ HINIC_TXQ_STAT(dropped),
+ HINIC_TXQ_STAT(big_frags_pkts),
+ HINIC_TXQ_STAT(big_udp_pkts),
+};
+
+static struct hinic_stats hinic_tx_queue_stats_extern[] = {
+ HINIC_TXQ_STAT(ufo_pkt_unsupport),
+ HINIC_TXQ_STAT(ufo_linearize_err),
+ HINIC_TXQ_STAT(ufo_alloc_skb_err),
+ HINIC_TXQ_STAT(skb_pad_err),
+ HINIC_TXQ_STAT(frag_len_overflow),
+ HINIC_TXQ_STAT(offload_cow_skb_err),
+ HINIC_TXQ_STAT(alloc_cpy_frag_err),
+ HINIC_TXQ_STAT(map_cpy_frag_err),
+ HINIC_TXQ_STAT(map_frag_err),
+ HINIC_TXQ_STAT(frag_size_err),
+ HINIC_TXQ_STAT(unknown_tunnel_pkt),
+};/*lint -restore*/
+
+#define HINIC_FUNC_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .size = sizeof_field(struct hinic_vport_stats, _stat_item), \
+ .offset = offsetof(struct hinic_vport_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_function_stats[] = {
+ HINIC_FUNC_STAT(tx_unicast_pkts_vport),
+ HINIC_FUNC_STAT(tx_unicast_bytes_vport),
+ HINIC_FUNC_STAT(tx_multicast_pkts_vport),
+ HINIC_FUNC_STAT(tx_multicast_bytes_vport),
+ HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
+ HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
+
+ HINIC_FUNC_STAT(rx_unicast_pkts_vport),
+ HINIC_FUNC_STAT(rx_unicast_bytes_vport),
+ HINIC_FUNC_STAT(rx_multicast_pkts_vport),
+ HINIC_FUNC_STAT(rx_multicast_bytes_vport),
+ HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
+ HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
+
+ HINIC_FUNC_STAT(tx_discard_vport),
+ HINIC_FUNC_STAT(rx_discard_vport),
+ HINIC_FUNC_STAT(tx_err_vport),
+ HINIC_FUNC_STAT(rx_err_vport),
+};
+
+#define HINIC_PORT_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
+ .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_port_stats[] = {
+ HINIC_PORT_STAT(mac_rx_total_pkt_num),
+ HINIC_PORT_STAT(mac_rx_total_oct_num),
+ HINIC_PORT_STAT(mac_rx_bad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_bad_oct_num),
+ HINIC_PORT_STAT(mac_rx_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_good_oct_num),
+ HINIC_PORT_STAT(mac_rx_uni_pkt_num),
+ HINIC_PORT_STAT(mac_rx_multi_pkt_num),
+ HINIC_PORT_STAT(mac_rx_broad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_total_pkt_num),
+ HINIC_PORT_STAT(mac_tx_total_oct_num),
+ HINIC_PORT_STAT(mac_tx_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_bad_oct_num),
+ HINIC_PORT_STAT(mac_tx_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_good_oct_num),
+ HINIC_PORT_STAT(mac_tx_uni_pkt_num),
+ HINIC_PORT_STAT(mac_tx_multi_pkt_num),
+ HINIC_PORT_STAT(mac_tx_broad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
+ HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
+ HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
+ HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
+ HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pause_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
+ HINIC_PORT_STAT(mac_rx_control_pkt_num),
+ HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
+ HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
+ HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
+ HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
+ HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
+ HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
+ HINIC_PORT_STAT(mac_tx_jabber_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pause_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
+ HINIC_PORT_STAT(mac_tx_control_pkt_num),
+ HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
+ HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
+};
+
+u32 hinic_get_io_stats_size(struct hinic_nic_dev *nic_dev)
+{
+ return ARRAY_LEN(hinic_nic_dev_stats) +
+ ARRAY_LEN(hinic_nic_dev_stats_extern) +
+ (ARRAY_LEN(hinic_tx_queue_stats) +
+ ARRAY_LEN(hinic_tx_queue_stats_extern) +
+ ARRAY_LEN(hinic_rx_queue_stats) +
+ ARRAY_LEN(hinic_rx_queue_stats_extern)) * nic_dev->max_qps;
+}
+
+#define GET_VALUE_OF_PTR(size, ptr) ( \
+ (size) == sizeof(u64) ? *(u64 *)(ptr) : \
+ (size) == sizeof(u32) ? *(u32 *)(ptr) : \
+ (size) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \
+)
+
+#define DEV_STATS_PACK(items, item_idx, array, stats_ptr) { \
+ int j; \
+ for (j = 0; j < ARRAY_LEN(array); j++) { \
+ memcpy((items)[item_idx].name, (array)[j].name, \
+ HINIC_SHOW_ITEM_LEN); \
+ (items)[item_idx].hexadecimal = 0; \
+ (items)[item_idx].value = \
+ GET_VALUE_OF_PTR((array)[j].size, \
+ (char *)(stats_ptr) + (array)[j].offset); \
+ item_idx++; \
+ } \
+}
+
+#define QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) { \
+ int j, err; \
+ for (j = 0; j < ARRAY_LEN(array); j++) { \
+ memcpy((items)[item_idx].name, (array)[j].name, \
+ HINIC_SHOW_ITEM_LEN); \
+ err = snprintf((items)[item_idx].name, HINIC_SHOW_ITEM_LEN,\
+ (array)[j].name, (qid)); \
+ if (err <= 0 || err >= HINIC_SHOW_ITEM_LEN) \
+ pr_err("Failed snprintf: func_ret(%d), dest_len(%d)\n",\
+ err, HINIC_SHOW_ITEM_LEN); \
+ (items)[item_idx].hexadecimal = 0; \
+ (items)[item_idx].value = \
+ GET_VALUE_OF_PTR((array)[j].size, \
+ (char *)(stats_ptr) + (array)[j].offset); \
+ item_idx++; \
+ } \
+}
+
+void hinic_get_io_stats(struct hinic_nic_dev *nic_dev,
+ struct hinic_show_item *items)
+{
+ int item_idx = 0;
+ u16 qid;
+
+ DEV_STATS_PACK(items, item_idx, hinic_nic_dev_stats, &nic_dev->stats);
+ DEV_STATS_PACK(items, item_idx, hinic_nic_dev_stats_extern,
+ &nic_dev->stats);
+
+ for (qid = 0; qid < nic_dev->max_qps; qid++) {
+ QUEUE_STATS_PACK(items, item_idx, hinic_tx_queue_stats,
+ &nic_dev->txqs[qid].txq_stats, qid);
+ QUEUE_STATS_PACK(items, item_idx, hinic_tx_queue_stats_extern,
+ &nic_dev->txqs[qid].txq_stats, qid);
+ }
+
+ for (qid = 0; qid < nic_dev->max_qps; qid++) {
+ QUEUE_STATS_PACK(items, item_idx, hinic_rx_queue_stats,
+ &nic_dev->rxqs[qid].rxq_stats, qid);
+ QUEUE_STATS_PACK(items, item_idx, hinic_rx_queue_stats_extern,
+ &nic_dev->rxqs[qid].rxq_stats, qid);
+ }
+}
+
+#define LP_DEFAULT_TIME 5 /* seconds */
+#define LP_PKT_LEN 1514
+#define OBJ_STR_MAX_LEN 32
+#define SET_LINK_STR_MAX_LEN 128
+
+#define PORT_DOWN_ERR_IDX 0
+enum diag_test_index {
+ INTERNAL_LP_TEST = 0,
+ EXTERNAL_LP_TEST = 1,
+ DIAG_TEST_MAX = 2,
+};
+
+static char hinic_test_strings[][ETH_GSTRING_LEN] = {
+ "Internal lb test (on/offline)",
+ "External lb test (external_lb)",
+};
+
+struct hw2ethtool_link_mode {
+ enum ethtool_link_mode_bit_indices link_mode_bit;
+ u32 speed;
+ enum hinic_link_mode hw_link_mode;
};
static struct hw2ethtool_link_mode
@@ -133,52 +417,23 @@ static struct hw2ethtool_link_mode
},
};
-#define LP_DEFAULT_TIME 5 /* seconds */
-#define LP_PKT_LEN 1514
-
-#define PORT_DOWN_ERR_IDX 0
-enum diag_test_index {
- INTERNAL_LP_TEST = 0,
- EXTERNAL_LP_TEST = 1,
- DIAG_TEST_MAX = 2,
+u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
+ SPEED_10, SPEED_100,
+ SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000,
+ SPEED_100000
};
-static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
- enum hinic_speed speed)
+static int hinic_ethtool_to_hw_speed_level(u32 speed)
{
- switch (speed) {
- case HINIC_SPEED_10MB_LINK:
- link_ksettings->base.speed = SPEED_10;
- break;
-
- case HINIC_SPEED_100MB_LINK:
- link_ksettings->base.speed = SPEED_100;
- break;
-
- case HINIC_SPEED_1000MB_LINK:
- link_ksettings->base.speed = SPEED_1000;
- break;
-
- case HINIC_SPEED_10GB_LINK:
- link_ksettings->base.speed = SPEED_10000;
- break;
-
- case HINIC_SPEED_25GB_LINK:
- link_ksettings->base.speed = SPEED_25000;
- break;
-
- case HINIC_SPEED_40GB_LINK:
- link_ksettings->base.speed = SPEED_40000;
- break;
-
- case HINIC_SPEED_100GB_LINK:
- link_ksettings->base.speed = SPEED_100000;
- break;
+ int i;
- default:
- link_ksettings->base.speed = SPEED_UNKNOWN;
- break;
+ for (i = 0; i < LINK_SPEED_LEVELS; i++) {
+ if (hw_to_ethtool_speed[i] == speed)
+ break;
}
+
+ return i;
}
static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
@@ -193,29 +448,54 @@ static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
return i;
}
-static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
- enum hinic_link_mode hw_link_mode,
- u32 name)
+static int hinic_is_support_speed(enum hinic_link_mode supported_link,
+ u32 speed)
{
enum hinic_link_mode link_mode;
- int idx = 0;
+ int idx;
for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
- if (hw_link_mode & ((u32)1 << link_mode)) {
- idx = hinic_get_link_mode_index(link_mode);
- if (idx >= HINIC_LINK_MODE_NUMBERS)
- continue;
+ if (!(supported_link & ((u32)1 << link_mode)))
+ continue;
- if (name == GET_SUPPORTED_MODE)
- ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
- (link_settings, idx);
- else
- ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
- (link_settings, idx);
- }
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (hw_to_ethtool_link_mode_table[idx].speed == speed)
+ return 1;
}
+
+ return 0;
}
+#define GET_SUPPORTED_MODE 0
+#define GET_ADVERTISED_MODE 1
+
+struct cmd_link_settings {
+ u64 supported;
+ u64 advertising;
+
+ u32 speed;
+ u8 duplex;
+ u8 port;
+ u8 autoneg;
+};
+
+#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+
+#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= SUPPORTED_##mode)
+#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= ADVERTISED_##mode)
+#define ETHTOOL_TEST_LINK_MODE_SUPPORTED(ecmd, mode) \
+ ((ecmd)->supported & SUPPORTED_##Autoneg)
+
static void hinic_link_port_type(struct cmd_link_settings *link_settings,
enum hinic_port_type port_type)
{
@@ -252,297 +532,376 @@ static void hinic_link_port_type(struct cmd_link_settings *link_settings,
}
}
-static int hinic_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings
- *link_ksettings)
+static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
+ enum hinic_link_mode hw_link_mode,
+ u32 name)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_link_mode_cmd link_mode = { 0 };
- struct hinic_pause_config pause_info = { 0 };
- struct cmd_link_settings settings = { 0 };
- enum hinic_port_link_state link_state;
- struct hinic_port_cap port_cap;
- int err;
-
- ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
- ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+ enum hinic_link_mode link_mode;
+ int idx = 0;
- link_ksettings->base.speed = SPEED_UNKNOWN;
- link_ksettings->base.autoneg = AUTONEG_DISABLE;
- link_ksettings->base.duplex = DUPLEX_UNKNOWN;
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (hw_link_mode & ((u32)1 << link_mode)) {
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
- err = hinic_port_get_cap(nic_dev, &port_cap);
- if (err)
- return err;
+ if (name == GET_SUPPORTED_MODE)
+ ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
+ (link_settings, idx);
+ else
+ ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
+ (link_settings, idx);
+ }
+ }
+}
- hinic_link_port_type(&settings, port_cap.port_type);
- link_ksettings->base.port = settings.port;
+static int hinic_link_speed_set(struct hinic_nic_dev *nic_dev,
+ struct cmd_link_settings *link_settings,
+ struct nic_port_info *port_info)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ enum hinic_link_mode supported_link = 0, advertised_link = 0;
+ u8 link_state = 0;
+ int err;
- err = hinic_port_link_state(nic_dev, &link_state);
- if (err)
+ err = hinic_get_link_mode(nic_dev->hwdev,
+ &supported_link, &advertised_link);
+ if (err || supported_link == HINIC_SUPPORTED_UNKNOWN ||
+ advertised_link == HINIC_SUPPORTED_UNKNOWN) {
+ nicif_err(nic_dev, drv, netdev, "Failed to get supported link modes\n");
return err;
-
- if (link_state == HINIC_LINK_STATE_UP) {
- set_link_speed(link_ksettings, port_cap.speed);
- link_ksettings->base.duplex =
- (port_cap.duplex == HINIC_DUPLEX_FULL) ?
- DUPLEX_FULL : DUPLEX_HALF;
}
- if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising, Autoneg);
-
- if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
- link_ksettings->base.autoneg = AUTONEG_ENABLE;
-
- err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
- if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
- link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
- return -EIO;
-
- hinic_add_ethtool_link_mode(&settings, link_mode.supported,
+ hinic_add_ethtool_link_mode(link_settings, supported_link,
GET_SUPPORTED_MODE);
- hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
+ hinic_add_ethtool_link_mode(link_settings, advertised_link,
GET_ADVERTISED_MODE);
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
- err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
- if (err)
- return err;
- ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
- if (pause_info.rx_pause && pause_info.tx_pause) {
- ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
- } else if (pause_info.tx_pause) {
- ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
- } else if (pause_info.rx_pause) {
- ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
- ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
- }
- }
+ err = hinic_get_link_state(nic_dev->hwdev, &link_state);
+ if (!err && link_state) {
+ link_settings->speed = port_info->speed < LINK_SPEED_LEVELS ?
+ hw_to_ethtool_speed[port_info->speed] :
+ (u32)SPEED_UNKNOWN;
- linkmode_copy(link_ksettings->link_modes.supported,
- (unsigned long *)&settings.supported);
- linkmode_copy(link_ksettings->link_modes.advertising,
- (unsigned long *)&settings.advertising);
+ link_settings->duplex = port_info->duplex;
+ } else {
+ link_settings->speed = (u32)SPEED_UNKNOWN;
+ link_settings->duplex = DUPLEX_UNKNOWN;
+ }
return 0;
}
-static int hinic_ethtool_to_hw_speed_level(u32 speed)
+static int get_link_settings(struct net_device *netdev,
+ struct cmd_link_settings *link_settings)
{
- int i;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct nic_port_info port_info = {0};
+ struct nic_pause_config nic_pause = {0};
+ int err;
- for (i = 0; i < LINK_SPEED_LEVELS; i++) {
- if (hw_to_ethtool_speed[i] == speed)
- break;
+ err = hinic_get_port_info(nic_dev->hwdev, &port_info);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to get port info\n");
+ return err;
}
- return i;
-}
+ err = hinic_link_speed_set(nic_dev, link_settings, &port_info);
+ if (err)
+ return err;
-static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
- u32 speed)
-{
- enum hinic_link_mode link_mode;
- int idx;
+ hinic_link_port_type(link_settings, port_info.port_type);
- for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
- if (!(supported_link & ((u32)1 << link_mode)))
- continue;
+ link_settings->autoneg = port_info.autoneg_state;
+ if (port_info.autoneg_cap)
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Autoneg);
+ if (port_info.autoneg_state)
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Autoneg);
- idx = hinic_get_link_mode_index(link_mode);
- if (idx >= HINIC_LINK_MODE_NUMBERS)
- continue;
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to get pauseparam from hw\n");
+ return err;
+ }
- if (hw_to_ethtool_link_mode_table[idx].speed == speed)
- return true;
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Pause);
+ if (nic_pause.rx_pause && nic_pause.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause);
+ } else if (nic_pause.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings,
+ Asym_Pause);
+ } else if (nic_pause.rx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings,
+ Asym_Pause);
+ }
}
- return false;
+ return 0;
}
-static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
+static int hinic_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
- struct hinic_link_mode_cmd link_mode = { 0 };
- struct net_device *netdev = nic_dev->netdev;
- enum nic_speed_level speed_level = 0;
+ struct cmd_link_settings settings = {0};
+ struct ethtool_link_settings *base = &cmd->base;
int err;
- err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ err = get_link_settings(netdev, &settings);
if (err)
- return false;
+ return err;
- if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
- link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
- return false;
+ bitmap_copy(cmd->link_modes.supported,
+ (unsigned long *)&settings.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_copy(cmd->link_modes.advertising,
+ (unsigned long *)&settings.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
- speed_level = hinic_ethtool_to_hw_speed_level(speed);
- if (speed_level >= LINK_SPEED_LEVELS ||
- !hinic_is_support_speed(link_mode.supported, speed)) {
- netif_err(nic_dev, drv, netdev,
- "Unsupported speed: %d\n", speed);
- return false;
- }
+ base->autoneg = settings.autoneg;
+ base->speed = settings.speed;
+ base->duplex = settings.duplex;
+ base->port = settings.port;
- return true;
+ return 0;
}
-static int get_link_settings_type(struct hinic_dev *nic_dev,
- u8 autoneg, u32 speed, u32 *set_settings)
+static int hinic_is_speed_legal(struct hinic_nic_dev *nic_dev, u32 speed)
{
- struct hinic_port_cap port_cap = { 0 };
+ struct net_device *netdev = nic_dev->netdev;
+ enum hinic_link_mode supported_link = 0, advertised_link = 0;
+ enum nic_speed_level speed_level = 0;
int err;
- err = hinic_port_get_cap(nic_dev, &port_cap);
- if (err)
- return err;
-
- /* always set autonegotiation */
- if (port_cap.autoneg_cap)
- *set_settings |= HILINK_LINK_SET_AUTONEG;
+ err = hinic_get_link_mode(nic_dev->hwdev,
+ &supported_link, &advertised_link);
+ if (err || supported_link == HINIC_SUPPORTED_UNKNOWN ||
+ advertised_link == HINIC_SUPPORTED_UNKNOWN) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to get supported link modes\n");
+ return -EAGAIN;
+ }
- if (autoneg == AUTONEG_ENABLE) {
- if (!port_cap.autoneg_cap) {
- netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
- return -EOPNOTSUPP;
- }
- } else if (speed != (u32)SPEED_UNKNOWN) {
- /* set speed only when autoneg is disabled */
- if (!hinic_is_speed_legal(nic_dev, speed))
- return -EINVAL;
- *set_settings |= HILINK_LINK_SET_SPEED;
- } else {
- netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
- return -EOPNOTSUPP;
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ if (speed_level >= LINK_SPEED_LEVELS ||
+ !hinic_is_support_speed(supported_link, speed)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Not supported speed: %d\n", speed);
+ return -EINVAL;
}
return 0;
}
-static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
- u32 set_settings, u8 autoneg,
- u32 speed)
+static int hinic_set_settings_to_hw(struct hinic_nic_dev *nic_dev,
+ u32 set_settings, u8 autoneg, u32 speed)
{
+ struct net_device *netdev = nic_dev->netdev;
+ struct hinic_link_ksettings settings = {0};
enum nic_speed_level speed_level = 0;
+ char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
int err = 0;
+ err = snprintf(set_link_str, sizeof(set_link_str), "%s",
+ (set_settings & HILINK_LINK_SET_AUTONEG) ?
+ (autoneg ? "autong enable " : "autong disable ") : "");
+ if (err < 0 || err >= SET_LINK_STR_MAX_LEN) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to snprintf link state, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+ if (set_settings & HILINK_LINK_SET_SPEED) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = snprintf(set_link_str, sizeof(set_link_str),
+ "%sspeed %d ", set_link_str, speed);
+ if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+ }
+
+ settings.valid_bitmap = set_settings;
+ settings.autoneg = autoneg;
+ settings.speed = speed_level;
+
+ err = hinic_set_link_settings(nic_dev->hwdev, &settings);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
+ if (err)
+ nicif_err(nic_dev, drv, netdev, "Set %sfailed\n",
+ set_link_str);
+ else
+ nicif_info(nic_dev, drv, netdev, "Set %ssuccess\n",
+ set_link_str);
+
+ return err;
+ }
+
if (set_settings & HILINK_LINK_SET_AUTONEG) {
err = hinic_set_autoneg(nic_dev->hwdev,
(autoneg == AUTONEG_ENABLE));
if (err)
- netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
+ nicif_err(nic_dev, drv, netdev, "%s autoneg failed\n",
(autoneg == AUTONEG_ENABLE) ?
"Enable" : "Disable");
else
- netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
+ nicif_info(nic_dev, drv, netdev, "%s autoneg success\n",
(autoneg == AUTONEG_ENABLE) ?
"Enable" : "Disable");
}
if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
- speed_level = hinic_ethtool_to_hw_speed_level(speed);
err = hinic_set_speed(nic_dev->hwdev, speed_level);
if (err)
- netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
+ nicif_err(nic_dev, drv, netdev, "Set speed %d failed\n",
speed);
else
- netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
+ nicif_info(nic_dev, drv, netdev, "Set speed %d success\n",
speed);
}
return err;
}
-static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
- u32 set_settings, u8 autoneg, u32 speed)
+static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
{
- struct hinic_link_ksettings_info settings = {0};
- char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
- const char *autoneg_str;
- struct net_device *netdev = nic_dev->netdev;
- enum nic_speed_level speed_level = 0;
- int err;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct nic_port_info port_info = {0};
+ u32 set_settings = 0;
+ int err = 0;
- autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ?
- (autoneg ? "autong enable " : "autong disable ") : "";
+ if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ nicif_err(nic_dev, drv, netdev, "Not support set link settings\n");
+ return -EOPNOTSUPP;
+ }
- if (set_settings & HILINK_LINK_SET_SPEED) {
- speed_level = hinic_ethtool_to_hw_speed_level(speed);
- err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
- "speed %d ", speed);
- if (err >= SET_LINK_STR_MAX_LEN) {
- netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
- err, SET_LINK_STR_MAX_LEN);
- return -EFAULT;
- }
+ err = hinic_get_port_info(nic_dev->hwdev, &port_info);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to get current settings\n");
+ return -EAGAIN;
}
- settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
- settings.valid_bitmap = set_settings;
- settings.autoneg = autoneg;
- settings.speed = speed_level;
+ /* Alwayse set autonegation */
+ if (port_info.autoneg_cap)
+ set_settings |= HILINK_LINK_SET_AUTONEG;
- err = hinic_set_link_settings(nic_dev->hwdev, &settings);
- if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
+ if (autoneg == AUTONEG_ENABLE) {
+ if (!port_info.autoneg_cap) {
+ nicif_err(nic_dev, drv, netdev, "Not support autoneg\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (speed != (u32)SPEED_UNKNOWN) {
+ /* Set speed only when autoneg is disable */
+ err = hinic_is_speed_legal(nic_dev, speed);
if (err)
- netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n",
- autoneg_str, set_link_str);
- else
- netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n",
- autoneg_str, set_link_str);
+ return err;
- return err;
+ set_settings |= HILINK_LINK_SET_SPEED;
+ } else {
+ nicif_err(nic_dev, drv, netdev, "Need to set speed when autoneg is off\n");
+ return -EOPNOTSUPP;
}
- return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
- speed);
-}
-
-static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u32 set_settings = 0;
- int err;
-
- err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
- if (err)
- return err;
-
if (set_settings)
err = hinic_set_settings_to_hw(nic_dev, set_settings,
autoneg, speed);
else
- netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
+ nicif_info(nic_dev, drv, netdev, "Nothing changed, exiting without setting anything\n");
return err;
}
-static int hinic_set_link_ksettings(struct net_device *netdev, const struct
- ethtool_link_ksettings *link_settings)
+static int hinic_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
- /* only support to set autoneg and speed */
- return set_link_settings(netdev, link_settings->base.autoneg,
- link_settings->base.speed);
+ /* Only support to set autoneg and speed */
+ return set_link_settings(netdev, cmd->base.autoneg,
+ cmd->base.speed);
}
static void hinic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
int err;
strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
- strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
+ strscpy(info->version, HINIC_DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
- err = hinic_get_mgmt_version(nic_dev, mgmt_ver);
- if (err)
+ err = hinic_get_mgmt_version(nic_dev->hwdev, mgmt_ver);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to get fw version\n");
return;
+ }
+
+ err = snprintf(info->fw_version, sizeof(info->fw_version),
+ "%s", mgmt_ver);
+ if (err <= 0 || err >= (int)sizeof(info->fw_version))
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to snprintf fw_version, function return(%d) and dest_len(%d)\n",
+ err, (int)sizeof(info->fw_version));
+}
+
+static u32 hinic_get_msglevel(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return nic_dev->msg_enable;
+}
+
+static void hinic_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->msg_enable = data;
+
+ nicif_info(nic_dev, drv, netdev, "Set message level: 0x%x\n", data);
+}
+
+static int hinic_nway_reset(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct nic_port_info port_info = {0};
+ int err;
+
+ if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ nicif_err(nic_dev, drv, netdev, "Current function don't support to restart autoneg\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = hinic_get_port_info(nic_dev->hwdev, &port_info);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Get autonegotiation state failed\n");
+ return -EFAULT;
+ }
+
+ if (!port_info.autoneg_state) {
+ nicif_err(nic_dev, drv, netdev,
+ "Autonegotiation is off, don't support to restart it\n");
+ return -EINVAL;
+ }
+
+ err = hinic_set_autoneg(nic_dev->hwdev, true);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Restart autonegotiation failed\n");
+ return -EFAULT;
+ }
+
+ nicif_info(nic_dev, drv, netdev, "Restart autonegotiation success\n");
- snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver);
+ return 0;
}
static void hinic_get_ringparam(struct net_device *netdev,
@@ -550,19 +909,40 @@ static void hinic_get_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
- ring->rx_pending = nic_dev->rq_depth;
- ring->tx_pending = nic_dev->sq_depth;
+ ring->rx_pending = nic_dev->rxqs[0].q_depth;
+ ring->tx_pending = nic_dev->txqs[0].q_depth;
+}
+
+static void hinic_update_qp_depth(struct hinic_nic_dev *nic_dev,
+ u16 sq_depth, u16 rq_depth)
+{
+ u16 i;
+
+ nic_dev->sq_depth = sq_depth;
+ nic_dev->rq_depth = rq_depth;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ nic_dev->txqs[i].q_depth = sq_depth;
+ nic_dev->txqs[i].q_mask = sq_depth - 1;
+ nic_dev->rxqs[i].q_depth = rq_depth;
+ nic_dev->rxqs[i].q_mask = rq_depth - 1;
+ }
}
-static int check_ringparam_valid(struct hinic_dev *nic_dev,
- struct ethtool_ringparam *ring)
+static int hinic_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 new_sq_depth, new_rq_depth;
+ int err;
+
if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
- netif_err(nic_dev, drv, nic_dev->netdev,
+ nicif_err(nic_dev, drv, netdev,
"Unsupported rx_jumbo_pending/rx_mini_pending\n");
return -EINVAL;
}
@@ -571,28 +951,12 @@ static int check_ringparam_valid(struct hinic_dev *nic_dev,
ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
- netif_err(nic_dev, drv, nic_dev->netdev,
+ nicif_err(nic_dev, drv, netdev,
"Queue depth out of range [%d-%d]\n",
HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
return -EINVAL;
}
- return 0;
-}
-
-static int hinic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring,
- struct kernel_ethtool_ringparam *kernel_ring,
- struct netlink_ext_ack *extack)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 new_sq_depth, new_rq_depth;
- int err;
-
- err = check_ringparam_valid(nic_dev, ring);
- if (err)
- return err;
-
new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
@@ -600,26 +964,27 @@ static int hinic_set_ringparam(struct net_device *netdev,
new_rq_depth == nic_dev->rq_depth)
return 0;
- netif_info(nic_dev, drv, netdev,
+ nicif_info(nic_dev, drv, netdev,
"Change Tx/Rx ring depth from %d/%d to %d/%d\n",
nic_dev->sq_depth, nic_dev->rq_depth,
new_sq_depth, new_rq_depth);
- nic_dev->sq_depth = new_sq_depth;
- nic_dev->rq_depth = new_rq_depth;
-
- if (netif_running(netdev)) {
- netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ if (!netif_running(netdev)) {
+ hinic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth);
+ } else {
+ nicif_info(nic_dev, drv, netdev, "Restarting netdev\n");
err = hinic_close(netdev);
if (err) {
- netif_err(nic_dev, drv, netdev,
+ nicif_err(nic_dev, drv, netdev,
"Failed to close netdev\n");
return -EFAULT;
}
+ hinic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth);
+
err = hinic_open(netdev);
if (err) {
- netif_err(nic_dev, drv, netdev,
+ nicif_err(nic_dev, drv, netdev,
"Failed to open netdev\n");
return -EFAULT;
}
@@ -628,170 +993,483 @@ static int hinic_set_ringparam(struct net_device *netdev,
return 0;
}
+static u16 hinic_max_channels(struct hinic_nic_dev *nic_dev)
+{
+ u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev);
+
+ return tcs ? nic_dev->max_qps / tcs : nic_dev->max_qps;
+}
+
+static u16 hinic_curr_channels(struct hinic_nic_dev *nic_dev)
+{
+ if (netif_running(nic_dev->netdev))
+ return nic_dev->num_rss ? nic_dev->num_rss : 1;
+ else
+ return min_t(u16, hinic_max_channels(nic_dev),
+ nic_dev->rss_limit);
+}
+
+static void hinic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ channels->max_rx = 0;
+ channels->max_tx = 0;
+ channels->max_other = 0;
+ channels->max_combined = hinic_max_channels(nic_dev);
+ channels->rx_count = 0;
+ channels->tx_count = 0;
+ channels->other_count = 0;
+ channels->combined_count = hinic_curr_channels(nic_dev);
+}
+
+void hinic_update_num_qps(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 num_qps;
+ u8 tcs;
+
+ /* change num_qps to change counter in ethtool -S */
+ tcs = (u8)netdev_get_num_tc(nic_dev->netdev);
+ num_qps = (u16)(nic_dev->rss_limit * (tcs ? tcs : 1));
+ nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_qps);
+}
+
+static int hinic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned int count = channels->combined_count;
+ int err;
+
+ if (!count) {
+ nicif_err(nic_dev, drv, netdev,
+ "Unsupported combined_count=0\n");
+ return -EINVAL;
+ }
+
+ if (channels->tx_count || channels->rx_count || channels->other_count) {
+ nicif_err(nic_dev, drv, netdev,
+ "Setting rx/tx/other count not supported\n");
+ return -EINVAL;
+ }
+
+ if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, netdev,
+ "This function don't support RSS, only support 1 queue pair\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (count > hinic_max_channels(nic_dev)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Combined count %d exceed limit %d\n",
+ count, hinic_max_channels(nic_dev));
+ return -EINVAL;
+ }
+
+ nicif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
+ nic_dev->rss_limit, count);
+ nic_dev->rss_limit = (u16)count;
+
+ if (netif_running(netdev)) {
+ nicif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ err = hinic_close(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
+ }
+ /* Discard user configured rss */
+ hinic_set_default_rss_indir(netdev);
+
+ err = hinic_open(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ return -EFAULT;
+ }
+ } else {
+ /* Discard user configured rss */
+ hinic_set_default_rss_indir(netdev);
+
+ hinic_update_num_qps(netdev);
+ }
+
+ return 0;
+}
+
+static int hinic_get_sset_count(struct net_device *netdev, int sset)
+{
+ int count = 0, q_num = 0;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ switch (sset) {
+ case ETH_SS_TEST:
+ return ARRAY_LEN(hinic_test_strings);
+ case ETH_SS_STATS:
+ q_num = nic_dev->num_qps;
+ count = ARRAY_LEN(hinic_netdev_stats) +
+ ARRAY_LEN(hinic_nic_dev_stats) +
+ (ARRAY_LEN(hinic_tx_queue_stats) +
+ ARRAY_LEN(hinic_rx_queue_stats)) * q_num;
+
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ count += ARRAY_LEN(hinic_function_stats);
+
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) &&
+ FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev))
+ count += ARRAY_LEN(hinic_port_stats);
+
+ return count;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define COALESCE_ALL_QUEUE 0xFFFF
+#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
+#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
+#define COALESCE_PENDING_LIMIT_UNIT 8
+#define COALESCE_TIMER_CFG_UNIT 9
+
static int __hinic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal, u16 queue)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_intr_coal_info *rx_intr_coal_info;
- struct hinic_intr_coal_info *tx_intr_coal_info;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_intr_coal_info *interrupt_info;
if (queue == COALESCE_ALL_QUEUE) {
/* get tx/rx irq0 as default parameters */
- rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0];
- tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0];
+ interrupt_info = &nic_dev->intr_coalesce[0];
} else {
if (queue >= nic_dev->num_qps) {
- netif_err(nic_dev, drv, netdev,
+ nicif_err(nic_dev, drv, netdev,
"Invalid queue_id: %d\n", queue);
return -EINVAL;
}
- rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue];
- tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue];
+ interrupt_info = &nic_dev->intr_coalesce[queue];
}
- /* coalesce_timer is in unit of 9us */
- coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg *
+ /* coalescs_timer is in unit of 9us */
+ coal->rx_coalesce_usecs = interrupt_info->coalesce_timer_cfg *
COALESCE_TIMER_CFG_UNIT;
- /* coalesced_frames is in unit of 8 */
- coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt *
+ /* coalescs_frams is in unit of 8 */
+ coal->rx_max_coalesced_frames = interrupt_info->pending_limt *
COALESCE_PENDING_LIMIT_UNIT;
- coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg *
- COALESCE_TIMER_CFG_UNIT;
- coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt *
- COALESCE_PENDING_LIMIT_UNIT;
-
- return 0;
-}
-static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal)
-{
- if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
- coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT ||
- coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
- coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT)
- return -ERANGE;
+ /* tx/rx use the same interrupt */
+ coal->tx_coalesce_usecs = coal->rx_coalesce_usecs;
+ coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
+ coal->use_adaptive_rx_coalesce = nic_dev->adaptive_rx_coal;
+
+ coal->pkt_rate_high = (u32)interrupt_info->pkt_rate_high;
+ coal->rx_coalesce_usecs_high = interrupt_info->rx_usecs_high *
+ COALESCE_TIMER_CFG_UNIT;
+ coal->rx_max_coalesced_frames_high =
+ interrupt_info->rx_pending_limt_high *
+ COALESCE_PENDING_LIMIT_UNIT;
+
+ coal->pkt_rate_low = (u32)interrupt_info->pkt_rate_low;
+ coal->rx_coalesce_usecs_low = interrupt_info->rx_usecs_low *
+ COALESCE_TIMER_CFG_UNIT;
+ coal->rx_max_coalesced_frames_low =
+ interrupt_info->rx_pending_limt_low *
+ COALESCE_PENDING_LIMIT_UNIT;
return 0;
}
-static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id,
- struct hinic_intr_coal_info *coal,
- bool set_rx_coal)
+static int set_queue_coalesce(struct hinic_nic_dev *nic_dev, u16 q_id,
+ struct hinic_intr_coal_info *coal)
{
- struct hinic_intr_coal_info *intr_coal = NULL;
- struct hinic_msix_config interrupt_info = {0};
+ struct hinic_intr_coal_info *intr_coal;
+ struct nic_interrupt_info interrupt_info = {0};
struct net_device *netdev = nic_dev->netdev;
- u16 msix_idx;
int err;
- intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] :
- &nic_dev->tx_intr_coalesce[q_id];
+ intr_coal = &nic_dev->intr_coalesce[q_id];
+ if (intr_coal->coalesce_timer_cfg != coal->coalesce_timer_cfg ||
+ intr_coal->pending_limt != coal->pending_limt)
+ intr_coal->user_set_intr_coal_flag = 1;
intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg;
intr_coal->pending_limt = coal->pending_limt;
+ intr_coal->pkt_rate_low = coal->pkt_rate_low;
+ intr_coal->rx_usecs_low = coal->rx_usecs_low;
+ intr_coal->rx_pending_limt_low = coal->rx_pending_limt_low;
+ intr_coal->pkt_rate_high = coal->pkt_rate_high;
+ intr_coal->rx_usecs_high = coal->rx_usecs_high;
+ intr_coal->rx_pending_limt_high = coal->rx_pending_limt_high;
/* netdev not running or qp not in using,
* don't need to set coalesce to hw
*/
- if (!(nic_dev->flags & HINIC_INTF_UP) ||
- q_id >= nic_dev->num_qps)
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) ||
+ q_id >= nic_dev->num_qps || nic_dev->adaptive_rx_coal)
return 0;
- msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry :
- nic_dev->txqs[q_id].sq->msix_entry;
- interrupt_info.msix_index = msix_idx;
- interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
- interrupt_info.pending_cnt = intr_coal->pending_limt;
- interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
-
- err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info);
+ interrupt_info.msix_index = nic_dev->irq_cfg[q_id].msix_entry_idx;
+ interrupt_info.lli_set = 0;
+ interrupt_info.interrupt_coalesc_set = 1;
+ interrupt_info.coalesc_timer_cfg = intr_coal->coalesce_timer_cfg;
+ interrupt_info.pending_limt = intr_coal->pending_limt;
+ interrupt_info.resend_timer_cfg = intr_coal->resend_timer_cfg;
+ nic_dev->rxqs[q_id].last_coalesc_timer_cfg =
+ intr_coal->coalesce_timer_cfg;
+ nic_dev->rxqs[q_id].last_pending_limt = intr_coal->pending_limt;
+ err = hinic_set_interrupt_cfg(nic_dev->hwdev, interrupt_info);
if (err)
- netif_warn(nic_dev, drv, netdev,
- "Failed to set %s queue%d coalesce",
- set_rx_coal ? "rx" : "tx", q_id);
+ nicif_warn(nic_dev, drv, netdev,
+ "Failed to set queue%d coalesce", q_id);
return err;
}
-static int __set_hw_coal_param(struct hinic_dev *nic_dev,
- struct hinic_intr_coal_info *intr_coal,
- u16 queue, bool set_rx_coal)
+static int is_coalesce_legal(struct net_device *netdev,
+ const struct ethtool_coalesce *coal)
{
- int err;
- u16 i;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct ethtool_coalesce tmp_coal = {0};
- if (queue == COALESCE_ALL_QUEUE) {
- for (i = 0; i < nic_dev->max_qps; i++) {
- err = set_queue_coalesce(nic_dev, i, intr_coal,
- set_rx_coal);
- if (err)
- return err;
- }
- } else {
- if (queue >= nic_dev->num_qps) {
- netif_err(nic_dev, drv, nic_dev->netdev,
- "Invalid queue_id: %d\n", queue);
- return -EINVAL;
- }
- err = set_queue_coalesce(nic_dev, queue, intr_coal,
- set_rx_coal);
- if (err)
- return err;
+ if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) {
+ nicif_err(nic_dev, drv, netdev,
+ "tx-usecs must be equal to rx-usecs\n");
+ return -EINVAL;
+ }
+
+ if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) {
+ nicif_err(nic_dev, drv, netdev,
+ "tx-frames must be equal to rx-frames\n");
+ return -EINVAL;
+ }
+
+ tmp_coal.cmd = coal->cmd;
+ tmp_coal.rx_coalesce_usecs = coal->rx_coalesce_usecs;
+ tmp_coal.rx_max_coalesced_frames = coal->rx_max_coalesced_frames;
+ tmp_coal.tx_coalesce_usecs = coal->tx_coalesce_usecs;
+ tmp_coal.tx_max_coalesced_frames = coal->tx_max_coalesced_frames;
+ tmp_coal.use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce;
+
+ tmp_coal.pkt_rate_low = coal->pkt_rate_low;
+ tmp_coal.rx_coalesce_usecs_low = coal->rx_coalesce_usecs_low;
+ tmp_coal.rx_max_coalesced_frames_low =
+ coal->rx_max_coalesced_frames_low;
+
+ tmp_coal.pkt_rate_high = coal->pkt_rate_high;
+ tmp_coal.rx_coalesce_usecs_high = coal->rx_coalesce_usecs_high;
+ tmp_coal.rx_max_coalesced_frames_high =
+ coal->rx_max_coalesced_frames_high;
+
+ if (memcmp(coal, &tmp_coal, sizeof(struct ethtool_coalesce))) {
+ nicif_err(nic_dev, drv, netdev,
+ "Only support to change rx/tx-usecs and rx/tx-frames\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG) {
+ nicif_err(nic_dev, drv, netdev,
+ "rx_coalesce_usecs out of range[%d-%d]\n", 0,
+ COALESCE_MAX_TIMER_CFG);
+ return -EOPNOTSUPP;
+ }
+
+ if (coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) {
+ nicif_err(nic_dev, drv, netdev,
+ "rx_max_coalesced_frames out of range[%d-%d]\n", 0,
+ COALESCE_MAX_PENDING_LIMIT);
+ return -EOPNOTSUPP;
+ }
+
+ if (coal->rx_coalesce_usecs_low > COALESCE_MAX_TIMER_CFG) {
+ nicif_err(nic_dev, drv, netdev,
+ "rx_coalesce_usecs_low out of range[%d-%d]\n", 0,
+ COALESCE_MAX_TIMER_CFG);
+ return -EOPNOTSUPP;
+ }
+
+ if (coal->rx_max_coalesced_frames_low > COALESCE_MAX_PENDING_LIMIT) {
+ nicif_err(nic_dev, drv, netdev,
+ "rx_max_coalesced_frames_low out of range[%d-%d]\n",
+ 0, COALESCE_MAX_PENDING_LIMIT);
+ return -EOPNOTSUPP;
+ }
+
+ if (coal->rx_coalesce_usecs_high > COALESCE_MAX_TIMER_CFG) {
+ nicif_err(nic_dev, drv, netdev,
+ "rx_coalesce_usecs_high out of range[%d-%d]\n", 0,
+ COALESCE_MAX_TIMER_CFG);
+ return -EOPNOTSUPP;
+ }
+
+ if (coal->rx_max_coalesced_frames_high > COALESCE_MAX_PENDING_LIMIT) {
+ nicif_err(nic_dev, drv, netdev,
+ "rx_max_coalesced_frames_high out of range[%d-%d]\n",
+ 0, COALESCE_MAX_PENDING_LIMIT);
+ return -EOPNOTSUPP;
+ }
+
+ if (coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT >=
+ coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT) {
+ nicif_err(nic_dev, drv, netdev,
+ "coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u), after dividing %d usecs unit\n",
+ coal->rx_coalesce_usecs_high,
+ coal->rx_coalesce_usecs_low,
+ COALESCE_TIMER_CFG_UNIT);
+ return -EOPNOTSUPP;
+ }
+
+ if (coal->rx_max_coalesced_frames_low / COALESCE_PENDING_LIMIT_UNIT >=
+ coal->rx_max_coalesced_frames_high / COALESCE_PENDING_LIMIT_UNIT) {
+ nicif_err(nic_dev, drv, netdev,
+ "coalesced_frames_high(%u) must more than coalesced_frames_low(%u), after dividing %d frames unit\n",
+ coal->rx_max_coalesced_frames_high,
+ coal->rx_max_coalesced_frames_low,
+ COALESCE_PENDING_LIMIT_UNIT);
+ return -EOPNOTSUPP;
+ }
+
+ if (coal->pkt_rate_low >= coal->pkt_rate_high) {
+ nicif_err(nic_dev, drv, netdev,
+ "pkt_rate_high(%u) must more than pkt_rate_low(%u)\n",
+ coal->pkt_rate_high,
+ coal->pkt_rate_low);
+ return -EOPNOTSUPP;
}
return 0;
}
+#define CHECK_COALESCE_ALIGN(coal, item, unit) \
+do { \
+ if ((coal)->item % (unit)) \
+ nicif_warn(nic_dev, drv, netdev, \
+ "%s in %d units, change to %d\n", \
+ #item, (unit), ALIGN_DOWN((coal)->item, unit));\
+} while (0)
+
+#define CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \
+do { \
+ if (((coal)->item / (unit)) != (ori_val)) \
+ nicif_info(nic_dev, drv, netdev, \
+ "Change %s from %d to %d %s\n", \
+ #item, (ori_val) * (unit), \
+ ALIGN_DOWN((coal)->item, unit), (obj_str));\
+} while (0)
+
+#define CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \
+do { \
+ if ((coal)->item != (ori_val)) \
+ nicif_info(nic_dev, drv, netdev, \
+ "Change %s from %llu to %u %s\n", \
+ #item, (ori_val), (coal)->item, (obj_str));\
+} while (0)
+
static int __hinic_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal, u16 queue)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_intr_coal_info rx_intr_coal = {0};
- struct hinic_intr_coal_info tx_intr_coal = {0};
- bool set_rx_coal = false;
- bool set_tx_coal = false;
- int err;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_intr_coal_info intr_coal = {0};
+ struct hinic_intr_coal_info *ori_intr_coal;
+ char obj_str[OBJ_STR_MAX_LEN] = {0};
+ u16 i;
+ int err = 0;
- err = is_coalesce_exceed_limit(coal);
+ err = is_coalesce_legal(netdev, coal);
if (err)
return err;
- if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) {
- rx_intr_coal.coalesce_timer_cfg =
- (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
- rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames /
- COALESCE_PENDING_LIMIT_UNIT);
- set_rx_coal = true;
- }
+ CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT);
+ CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames,
+ COALESCE_PENDING_LIMIT_UNIT);
+ CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high,
+ COALESCE_TIMER_CFG_UNIT);
+ CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high,
+ COALESCE_PENDING_LIMIT_UNIT);
+ CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low,
+ COALESCE_TIMER_CFG_UNIT);
+ CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low,
+ COALESCE_PENDING_LIMIT_UNIT);
- if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) {
- tx_intr_coal.coalesce_timer_cfg =
- (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
- tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames /
- COALESCE_PENDING_LIMIT_UNIT);
- set_tx_coal = true;
+ if (queue == COALESCE_ALL_QUEUE) {
+ ori_intr_coal = &nic_dev->intr_coalesce[0];
+ err = snprintf(obj_str, sizeof(obj_str), "for netdev");
+ if (err <= 0 || err >= OBJ_STR_MAX_LEN) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to snprintf string, function return(%d) and dest_len(%d)\n",
+ err, OBJ_STR_MAX_LEN);
+ return -EFAULT;
+ }
+ } else {
+ ori_intr_coal = &nic_dev->intr_coalesce[queue];
+ err = snprintf(obj_str, sizeof(obj_str), "for queue %d", queue);
+ if (err <= 0 || err >= OBJ_STR_MAX_LEN) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to snprintf string, function return(%d) and dest_len(%d)\n",
+ err, OBJ_STR_MAX_LEN);
+ return -EFAULT;
+ }
}
+ CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT,
+ ori_intr_coal->coalesce_timer_cfg, obj_str);
+ CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames,
+ COALESCE_PENDING_LIMIT_UNIT,
+ ori_intr_coal->pending_limt, obj_str);
+ CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high,
+ ori_intr_coal->pkt_rate_high, obj_str);
+ CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high,
+ COALESCE_TIMER_CFG_UNIT,
+ ori_intr_coal->rx_usecs_high, obj_str);
+ CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high,
+ COALESCE_PENDING_LIMIT_UNIT,
+ ori_intr_coal->rx_pending_limt_high, obj_str);
+ CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low,
+ ori_intr_coal->pkt_rate_low, obj_str);
+ CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low,
+ COALESCE_TIMER_CFG_UNIT,
+ ori_intr_coal->rx_usecs_low, obj_str);
+ CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low,
+ COALESCE_PENDING_LIMIT_UNIT,
+ ori_intr_coal->rx_pending_limt_low, obj_str);
+
+ intr_coal.coalesce_timer_cfg =
+ (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
+ intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames /
+ COALESCE_PENDING_LIMIT_UNIT);
+
+ nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+
+ intr_coal.pkt_rate_high = coal->pkt_rate_high;
+ intr_coal.rx_usecs_high =
+ (u8)(coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT);
+ intr_coal.rx_pending_limt_high =
+ (u8)(coal->rx_max_coalesced_frames_high /
+ COALESCE_PENDING_LIMIT_UNIT);
+
+ intr_coal.pkt_rate_low = coal->pkt_rate_low;
+ intr_coal.rx_usecs_low =
+ (u8)(coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT);
+ intr_coal.rx_pending_limt_low =
+ (u8)(coal->rx_max_coalesced_frames_low /
+ COALESCE_PENDING_LIMIT_UNIT);
+
+ /* coalesce timer or pending set to zero will disable coalesce */
+ if (!nic_dev->adaptive_rx_coal &&
+ (!intr_coal.coalesce_timer_cfg || !intr_coal.pending_limt))
+ nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n");
- /* setting coalesce timer or pending limit to zero will disable
- * coalesce
- */
- if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg ||
- !rx_intr_coal.pending_limt))
- netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n");
- if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg ||
- !tx_intr_coal.pending_limt))
- netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n");
-
- if (set_rx_coal) {
- err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true);
- if (err)
- return err;
- }
- if (set_tx_coal) {
- err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false);
- if (err)
- return err;
+ if (queue == COALESCE_ALL_QUEUE) {
+ for (i = 0; i < nic_dev->max_qps; i++)
+ set_queue_coalesce(nic_dev, i, &intr_coal);
+ } else {
+ if (queue >= nic_dev->num_qps) {
+ nicif_err(nic_dev, drv, netdev,
+ "Invalid queue_id: %d\n", queue);
+ return -EINVAL;
+ }
+ set_queue_coalesce(nic_dev, queue, &intr_coal);
}
+
return 0;
}
@@ -823,702 +1501,290 @@ static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
return __hinic_set_coalesce(netdev, coal, queue);
}
-static void hinic_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+static void get_drv_queue_stats(struct hinic_nic_dev *nic_dev, u64 *data)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_pause_config pause_info = {0};
- struct hinic_nic_cfg *nic_cfg;
- int err;
+ struct hinic_txq_stats txq_stats;
+ struct hinic_rxq_stats rxq_stats;
+ u16 i = 0, j = 0, qid = 0;
+ char *p;
- nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ if (!nic_dev->txqs)
+ break;
- err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
- if (!err) {
- pause->autoneg = pause_info.auto_neg;
- if (nic_cfg->pause_set || !pause_info.auto_neg) {
- pause->rx_pause = nic_cfg->rx_pause;
- pause->tx_pause = nic_cfg->tx_pause;
- } else {
- pause->rx_pause = pause_info.rx_pause;
- pause->tx_pause = pause_info.tx_pause;
+ hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
+ for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) {
+ p = (char *)(&txq_stats) +
+ hinic_tx_queue_stats[j].offset;
+ data[i] = (hinic_tx_queue_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
-}
-
-static int hinic_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_pause_config pause_info = {0};
- struct hinic_port_cap port_cap = {0};
- int err;
-
- err = hinic_port_get_cap(nic_dev, &port_cap);
- if (err)
- return -EIO;
-
- if (pause->autoneg != port_cap.autoneg_state)
- return -EOPNOTSUPP;
- pause_info.auto_neg = pause->autoneg;
- pause_info.rx_pause = pause->rx_pause;
- pause_info.tx_pause = pause->tx_pause;
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ if (!nic_dev->rxqs)
+ break;
- mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
- err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
- if (err) {
- mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
- return err;
+ hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
+ for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) {
+ p = (char *)(&rxq_stats) +
+ hinic_rx_queue_stats[j].offset;
+ data[i] = (hinic_rx_queue_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
}
- nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true;
- nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg;
- nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause;
- nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause;
- mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
-
- return 0;
}
-static void hinic_get_channels(struct net_device *netdev,
- struct ethtool_channels *channels)
+static void hinic_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *net_stats;
+ struct hinic_phy_port_stats *port_stats;
+ struct hinic_nic_stats *nic_stats;
+ struct hinic_vport_stats vport_stats = {0};
+ u16 i = 0, j = 0;
+ char *p;
+ int err;
- channels->max_combined = nic_dev->max_qps;
- channels->combined_count = hinic_hwdev_num_qps(hwdev);
-}
+ net_stats = dev_get_stats(netdev, &temp);
+ for (j = 0; j < ARRAY_LEN(hinic_netdev_stats); j++, i++) {
+ p = (char *)(net_stats) + hinic_netdev_stats[j].offset;
+ data[i] = (hinic_netdev_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
-static int hinic_set_channels(struct net_device *netdev,
- struct ethtool_channels *channels)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- unsigned int count = channels->combined_count;
- int err;
+ nic_stats = &nic_dev->stats;
+ for (j = 0; j < ARRAY_LEN(hinic_nic_dev_stats); j++, i++) {
+ p = (char *)(nic_stats) + hinic_nic_dev_stats[j].offset;
+ data[i] = (hinic_nic_dev_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
- netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
- hinic_hwdev_num_qps(nic_dev->hwdev), count);
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
+ if (err)
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to get function stats from fw\n");
- if (netif_running(netdev)) {
- netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
- hinic_close(netdev);
+ for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) {
+ p = (char *)(&vport_stats) +
+ hinic_function_stats[j].offset;
+ data[i] = (hinic_function_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ }
+
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) &&
+ FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
+ if (!port_stats) {
+ memset(&data[i], 0,
+ ARRAY_LEN(hinic_port_stats) * sizeof(*data));
+ i += ARRAY_LEN(hinic_port_stats);
+ goto get_drv_stats;
+ }
- nic_dev->hwdev->nic_cap.num_qps = count;
+ err = hinic_get_phy_port_stats(nic_dev->hwdev, port_stats);
+ if (err)
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to get port stats from fw\n");
- err = hinic_open(netdev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to open netdev\n");
- return -EFAULT;
+ for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) {
+ p = (char *)(port_stats) + hinic_port_stats[j].offset;
+ data[i] = (hinic_port_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- } else {
- nic_dev->hwdev->nic_cap.num_qps = count;
+
+ kfree(port_stats);
}
- return 0;
+get_drv_stats:
+ get_drv_queue_stats(nic_dev, data + i);
}
-static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static void hinic_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
{
- struct hinic_rss_type rss_type = { 0 };
- int err;
+ u16 i = 0, j = 0;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ char *p = (char *)data;
- cmd->data = 0;
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
+ return;
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_LEN(hinic_netdev_stats); i++) {
+ memcpy(p, hinic_netdev_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
- if (!(nic_dev->flags & HINIC_RSS_ENABLE))
- return 0;
+ for (i = 0; i < ARRAY_LEN(hinic_nic_dev_stats); i++) {
+ memcpy(p, hinic_nic_dev_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
- err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
- &rss_type);
- if (err)
- return err;
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
+ memcpy(p, hinic_function_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ }
- cmd->data = RXH_IP_SRC | RXH_IP_DST;
- switch (cmd->flow_type) {
- case TCP_V4_FLOW:
- if (rss_type.tcp_ipv4)
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case TCP_V6_FLOW:
- if (rss_type.tcp_ipv6)
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V4_FLOW:
- if (rss_type.udp_ipv4)
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V6_FLOW:
- if (rss_type.udp_ipv6)
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case IPV4_FLOW:
- case IPV6_FLOW:
- break;
- default:
- cmd->data = 0;
- return -EINVAL;
- }
-
- return 0;
-}
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) &&
+ FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) {
+ memcpy(p, hinic_port_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ }
-static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
- struct hinic_rss_type *rss_type)
-{
- u8 rss_l4_en = 0;
+ for (i = 0; i < nic_dev->num_qps; i++) {
+ for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) {
+ sprintf(p, hinic_tx_queue_stats[j].name, i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
- switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- rss_l4_en = 0;
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- rss_l4_en = 1;
- break;
- default:
- return -EINVAL;
- }
+ for (i = 0; i < nic_dev->num_qps; i++) {
+ for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) {
+ sprintf(p, hinic_rx_queue_stats[j].name, i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
- switch (cmd->flow_type) {
- case TCP_V4_FLOW:
- rss_type->tcp_ipv4 = rss_l4_en;
- break;
- case TCP_V6_FLOW:
- rss_type->tcp_ipv6 = rss_l4_en;
- break;
- case UDP_V4_FLOW:
- rss_type->udp_ipv4 = rss_l4_en;
- break;
- case UDP_V6_FLOW:
- rss_type->udp_ipv6 = rss_l4_en;
- break;
+ return;
default:
- return -EINVAL;
+ nicif_err(nic_dev, drv, netdev,
+ "Invalid string set %d", stringset);
+ return;
}
-
- return 0;
}
-static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
- struct hinic_rss_type *rss_type = &nic_dev->rss_type;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 port;
int err;
- if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
- cmd->data = 0;
+ if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ nicif_err(nic_dev, drv, netdev, "Current function don't support to set LED status\n");
return -EOPNOTSUPP;
}
- /* RSS does not support anything other than hashing
- * to queues on src and dst IPs and ports
- */
- if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 |
- RXH_L4_B_2_3))
- return -EINVAL;
+ port = hinic_physical_port_id(nic_dev->hwdev);
- /* We need at least the IP SRC and DEST fields for hashing */
- if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST))
- return -EINVAL;
-
- err = hinic_get_rss_type(nic_dev,
- nic_dev->rss_tmpl_idx, rss_type);
- if (err)
- return -EFAULT;
-
- switch (cmd->flow_type) {
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- case UDP_V4_FLOW:
- case UDP_V6_FLOW:
- err = set_l4_rss_hash_ops(cmd, rss_type);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ err = hinic_set_led_status(nic_dev->hwdev, port,
+ HINIC_LED_TYPE_LINK,
+ HINIC_LED_MODE_FORCE_2HZ);
if (err)
- return err;
- break;
- case IPV4_FLOW:
- rss_type->ipv4 = 1;
- break;
- case IPV6_FLOW:
- rss_type->ipv6 = 1;
+ nicif_err(nic_dev, drv, netdev,
+ "Set LED blinking in 2HZ failed\n");
+ else
+ nicif_info(nic_dev, drv, netdev,
+ "Set LED blinking in 2HZ success\n");
break;
- default:
- return -EINVAL;
- }
-
- err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
- *rss_type);
- if (err)
- return -EFAULT;
-
- return 0;
-}
-
-static int __set_rss_rxfh(struct net_device *netdev,
- const u32 *indir, const u8 *key)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err;
-
- if (indir) {
- if (!nic_dev->rss_indir_user) {
- nic_dev->rss_indir_user =
- kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE,
- GFP_KERNEL);
- if (!nic_dev->rss_indir_user)
- return -ENOMEM;
- }
-
- memcpy(nic_dev->rss_indir_user, indir,
- sizeof(u32) * HINIC_RSS_INDIR_SIZE);
-
- err = hinic_rss_set_indir_tbl(nic_dev,
- nic_dev->rss_tmpl_idx, indir);
- if (err)
- return -EFAULT;
- }
- if (key) {
- if (!nic_dev->rss_hkey_user) {
- nic_dev->rss_hkey_user =
- kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL);
-
- if (!nic_dev->rss_hkey_user)
- return -ENOMEM;
- }
-
- memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE);
-
- err = hinic_rss_set_template_tbl(nic_dev,
- nic_dev->rss_tmpl_idx, key);
+ case ETHTOOL_ID_INACTIVE:
+ err = hinic_reset_led_status(nic_dev->hwdev, port);
if (err)
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int hinic_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd, u32 *rule_locs)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
+ nicif_err(nic_dev, drv, netdev,
+ "Reset LED to original status failed\n");
+ else
+ nicif_info(nic_dev, drv, netdev,
+ "Reset LED to original status success\n");
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = nic_dev->num_qps;
- break;
- case ETHTOOL_GRXFH:
- err = hinic_get_rss_hash_opts(nic_dev, cmd);
break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
- return err;
-}
-
-static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- err = hinic_set_rss_hash_opts(nic_dev, cmd);
- break;
default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int hinic_get_rxfh(struct net_device *netdev,
- u32 *indir, u8 *key, u8 *hfunc)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u8 hash_engine_type = 0;
- int err = 0;
-
- if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
-
- if (hfunc) {
- err = hinic_rss_get_hash_engine(nic_dev,
- nic_dev->rss_tmpl_idx,
- &hash_engine_type);
- if (err)
- return -EFAULT;
-
- *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
}
- if (indir) {
- err = hinic_rss_get_indir_tbl(nic_dev,
- nic_dev->rss_tmpl_idx, indir);
- if (err)
- return -EFAULT;
- }
-
- if (key)
- err = hinic_rss_get_template_tbl(nic_dev,
- nic_dev->rss_tmpl_idx, key);
-
- return err;
-}
-
-static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
-
- if (!(nic_dev->flags & HINIC_RSS_ENABLE))
- return -EOPNOTSUPP;
-
- if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
- if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)
- return -EOPNOTSUPP;
-
- nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
- HINIC_RSS_HASH_ENGINE_TYPE_XOR :
- HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
- err = hinic_rss_set_hash_engine
- (nic_dev, nic_dev->rss_tmpl_idx,
- nic_dev->rss_hash_engine);
- if (err)
- return -EFAULT;
- }
-
- err = __set_rss_rxfh(netdev, indir, key);
-
- return err;
-}
-
-static u32 hinic_get_rxfh_key_size(struct net_device *netdev)
-{
- return HINIC_RSS_KEY_SIZE;
-}
-
-static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
-{
- return HINIC_RSS_INDIR_SIZE;
-}
-
-#define HINIC_FUNC_STAT(_stat_item) { \
- .name = #_stat_item, \
- .size = sizeof_field(struct hinic_vport_stats, _stat_item), \
- .offset = offsetof(struct hinic_vport_stats, _stat_item) \
-}
-
-static struct hinic_stats hinic_function_stats[] = {
- HINIC_FUNC_STAT(tx_unicast_pkts_vport),
- HINIC_FUNC_STAT(tx_unicast_bytes_vport),
- HINIC_FUNC_STAT(tx_multicast_pkts_vport),
- HINIC_FUNC_STAT(tx_multicast_bytes_vport),
- HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
- HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
-
- HINIC_FUNC_STAT(rx_unicast_pkts_vport),
- HINIC_FUNC_STAT(rx_unicast_bytes_vport),
- HINIC_FUNC_STAT(rx_multicast_pkts_vport),
- HINIC_FUNC_STAT(rx_multicast_bytes_vport),
- HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
- HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
-
- HINIC_FUNC_STAT(tx_discard_vport),
- HINIC_FUNC_STAT(rx_discard_vport),
- HINIC_FUNC_STAT(tx_err_vport),
- HINIC_FUNC_STAT(rx_err_vport),
-};
-
-static char hinic_test_strings[][ETH_GSTRING_LEN] = {
- "Internal lb test (on/offline)",
- "External lb test (external_lb)",
-};
-
-#define HINIC_PORT_STAT(_stat_item) { \
- .name = #_stat_item, \
- .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
- .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
-}
-
-static struct hinic_stats hinic_port_stats[] = {
- HINIC_PORT_STAT(mac_rx_total_pkt_num),
- HINIC_PORT_STAT(mac_rx_total_oct_num),
- HINIC_PORT_STAT(mac_rx_bad_pkt_num),
- HINIC_PORT_STAT(mac_rx_bad_oct_num),
- HINIC_PORT_STAT(mac_rx_good_pkt_num),
- HINIC_PORT_STAT(mac_rx_good_oct_num),
- HINIC_PORT_STAT(mac_rx_uni_pkt_num),
- HINIC_PORT_STAT(mac_rx_multi_pkt_num),
- HINIC_PORT_STAT(mac_rx_broad_pkt_num),
- HINIC_PORT_STAT(mac_tx_total_pkt_num),
- HINIC_PORT_STAT(mac_tx_total_oct_num),
- HINIC_PORT_STAT(mac_tx_bad_pkt_num),
- HINIC_PORT_STAT(mac_tx_bad_oct_num),
- HINIC_PORT_STAT(mac_tx_good_pkt_num),
- HINIC_PORT_STAT(mac_tx_good_oct_num),
- HINIC_PORT_STAT(mac_tx_uni_pkt_num),
- HINIC_PORT_STAT(mac_tx_multi_pkt_num),
- HINIC_PORT_STAT(mac_tx_broad_pkt_num),
- HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
- HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
- HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
- HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
- HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
- HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
- HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
- HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
- HINIC_PORT_STAT(mac_rx_pause_num),
- HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
- HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
- HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
- HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
- HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
- HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
- HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
- HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
- HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
- HINIC_PORT_STAT(mac_rx_control_pkt_num),
- HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
- HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
- HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
- HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
- HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
- HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
- HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
- HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
- HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
- HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
- HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
- HINIC_PORT_STAT(mac_tx_jabber_pkt_num),
- HINIC_PORT_STAT(mac_tx_pause_num),
- HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
- HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
- HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
- HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
- HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
- HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
- HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
- HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
- HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
- HINIC_PORT_STAT(mac_tx_control_pkt_num),
- HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
- HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
- HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
-};
-
-#define HINIC_TXQ_STAT(_stat_item) { \
- .name = "txq%d_"#_stat_item, \
- .size = sizeof_field(struct hinic_txq_stats, _stat_item), \
- .offset = offsetof(struct hinic_txq_stats, _stat_item) \
-}
-
-static struct hinic_stats hinic_tx_queue_stats[] = {
- HINIC_TXQ_STAT(pkts),
- HINIC_TXQ_STAT(bytes),
- HINIC_TXQ_STAT(tx_busy),
- HINIC_TXQ_STAT(tx_wake),
- HINIC_TXQ_STAT(tx_dropped),
- HINIC_TXQ_STAT(big_frags_pkts),
-};
-
-#define HINIC_RXQ_STAT(_stat_item) { \
- .name = "rxq%d_"#_stat_item, \
- .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
- .offset = offsetof(struct hinic_rxq_stats, _stat_item) \
-}
-
-static struct hinic_stats hinic_rx_queue_stats[] = {
- HINIC_RXQ_STAT(pkts),
- HINIC_RXQ_STAT(bytes),
- HINIC_RXQ_STAT(errors),
- HINIC_RXQ_STAT(csum_errors),
- HINIC_RXQ_STAT(other_errors),
-};
-
-static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
-{
- struct hinic_txq_stats txq_stats;
- struct hinic_rxq_stats rxq_stats;
- u16 i = 0, j = 0, qid = 0;
- char *p;
-
- for (qid = 0; qid < nic_dev->num_qps; qid++) {
- if (!nic_dev->txqs)
- break;
-
- hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
- for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) {
- p = (char *)&txq_stats +
- hinic_tx_queue_stats[j].offset;
- data[i] = (hinic_tx_queue_stats[j].size ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
-
- for (qid = 0; qid < nic_dev->num_qps; qid++) {
- if (!nic_dev->rxqs)
- break;
-
- hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
- for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) {
- p = (char *)&rxq_stats +
- hinic_rx_queue_stats[j].offset;
- data[i] = (hinic_rx_queue_stats[j].size ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
+ if (err)
+ return -EFAULT;
+ else
+ return 0;
}
-static void hinic_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+static void hinic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_vport_stats vport_stats = {0};
- struct hinic_phy_port_stats *port_stats;
- u16 i = 0, j = 0;
- char *p;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct nic_pause_config nic_pause = {0};
int err;
- err = hinic_get_vport_stats(nic_dev, &vport_stats);
- if (err)
- netif_err(nic_dev, drv, netdev,
- "Failed to get vport stats from firmware\n");
-
- for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) {
- p = (char *)&vport_stats + hinic_function_stats[j].offset;
- data[i] = (hinic_function_stats[j].size ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
-
- port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
- if (!port_stats) {
- memset(&data[i], 0,
- ARRAY_SIZE(hinic_port_stats) * sizeof(*data));
- i += ARRAY_SIZE(hinic_port_stats);
- goto get_drv_stats;
- }
-
- err = hinic_get_phy_port_stats(nic_dev, port_stats);
- if (err)
- netif_err(nic_dev, drv, netdev,
- "Failed to get port stats from firmware\n");
-
- for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) {
- p = (char *)port_stats + hinic_port_stats[j].offset;
- data[i] = (hinic_port_stats[j].size ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to get pauseparam from hw\n");
+ } else {
+ pause->autoneg = nic_pause.auto_neg;
+ pause->rx_pause = nic_pause.rx_pause;
+ pause->tx_pause = nic_pause.tx_pause;
}
-
- kfree(port_stats);
-
-get_drv_stats:
- get_drv_queue_stats(nic_dev, data + i);
}
-static int hinic_get_sset_count(struct net_device *netdev, int sset)
+static int hinic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int count, q_num;
-
- switch (sset) {
- case ETH_SS_TEST:
- return ARRAY_SIZE(hinic_test_strings);
- case ETH_SS_STATS:
- q_num = nic_dev->num_qps;
- count = ARRAY_SIZE(hinic_function_stats) +
- (ARRAY_SIZE(hinic_tx_queue_stats) +
- ARRAY_SIZE(hinic_rx_queue_stats)) * q_num;
-
- count += ARRAY_SIZE(hinic_port_stats);
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct nic_pause_config nic_pause = {0};
+ struct nic_port_info port_info = {0};
+ int err;
- return count;
- default:
+ if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ nicif_err(nic_dev, drv, netdev, "Not support to set pause parameters\n");
return -EOPNOTSUPP;
}
-}
-
-static void hinic_get_strings(struct net_device *netdev,
- u32 stringset, u8 *data)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- char *p = (char *)data;
- u16 i, j;
-
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
- return;
- case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
- memcpy(p, hinic_function_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
-
- for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
- memcpy(p, hinic_port_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
- sprintf(p, hinic_tx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ err = hinic_get_port_info(nic_dev->hwdev, &port_info);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to get auto-negotiation state\n");
+ return -EFAULT;
+ }
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
- sprintf(p, hinic_rx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ if (pause->autoneg != port_info.autoneg_state) {
+ nicif_err(nic_dev, drv, netdev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
- return;
- default:
- return;
+ nic_pause.auto_neg = pause->autoneg;
+ nic_pause.rx_pause = pause->rx_pause;
+ nic_pause.tx_pause = pause->tx_pause;
+
+ err = hinic_set_pause_info(nic_dev->hwdev, nic_pause);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to set pauseparam\n");
+ return err;
}
+
+ nicif_info(nic_dev, drv, netdev, "Set pause options, tx: %s, rx: %s\n",
+ pause->tx_pause ? "on" : "off",
+ pause->rx_pause ? "on" : "off");
+
+ return 0;
}
-static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
+static int hinic_run_lp_test(struct hinic_nic_dev *nic_dev, u32 test_time)
{
- u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf;
- struct net_device *netdev = nic_dev->netdev;
- struct sk_buff *skb_tmp = NULL;
- struct sk_buff *skb = NULL;
- u32 cnt = test_time * 5;
- u8 *test_data = NULL;
u32 i;
u8 j;
+ u32 cnt = test_time * 5;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *skb_tmp = NULL;
+ u8 *test_data = NULL;
+ u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf;
+ struct net_device *netdev = nic_dev->netdev;
skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC);
if (!skb_tmp)
@@ -1526,7 +1792,7 @@ static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
test_data = __skb_put(skb_tmp, LP_PKT_LEN);
- memset(test_data, 0xFF, 2 * ETH_ALEN);
+ memset(test_data, 0xFF, (2 * ETH_ALEN));
test_data[ETH_ALEN] = 0xFE;
test_data[2 * ETH_ALEN] = 0x08;
test_data[2 * ETH_ALEN + 1] = 0x0;
@@ -1540,13 +1806,13 @@ static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
for (i = 0; i < cnt; i++) {
nic_dev->lb_test_rx_idx = 0;
- memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN);
+ memset(lb_test_rx_buf, 0, (LP_PKT_CNT * LP_PKT_LEN));
for (j = 0; j < LP_PKT_CNT; j++) {
skb = pskb_copy(skb_tmp, GFP_ATOMIC);
if (!skb) {
dev_kfree_skb_any(skb_tmp);
- netif_err(nic_dev, drv, netdev,
+ nicif_err(nic_dev, drv, netdev,
"Copy skb failed for loopback test\n");
return -ENOMEM;
}
@@ -1557,305 +1823,623 @@ static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
if (hinic_lb_xmit_frame(skb, netdev)) {
dev_kfree_skb_any(skb);
dev_kfree_skb_any(skb_tmp);
- netif_err(nic_dev, drv, netdev,
+ nicif_err(nic_dev, drv, netdev,
"Xmit pkt failed for loopback test\n");
return -EBUSY;
}
}
- /* wait till all pkts received to RX buffer */
+ /* wait till all pkt received to rx buffer */
msleep(200);
for (j = 0; j < LP_PKT_CNT; j++) {
- if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN,
- skb_tmp->data, LP_PKT_LEN - 1) ||
- (*(lb_test_rx_buf + j * LP_PKT_LEN +
- LP_PKT_LEN - 1) != j)) {
+ if (memcmp((lb_test_rx_buf + (j * LP_PKT_LEN)),
+ skb_tmp->data, (LP_PKT_LEN - 1)) ||
+ (*(lb_test_rx_buf + ((j * LP_PKT_LEN) +
+ (LP_PKT_LEN - 1))) != j)) {
dev_kfree_skb_any(skb_tmp);
- netif_err(nic_dev, drv, netdev,
+ nicif_err(nic_dev, drv, netdev,
"Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n",
- j + i * LP_PKT_CNT,
- LP_PKT_LEN - 1,
- *(lb_test_rx_buf + j * LP_PKT_LEN +
- LP_PKT_LEN - 1));
+ (j + (i * LP_PKT_CNT)),
+ (LP_PKT_LEN - 1),
+ *(lb_test_rx_buf +
+ (((j * LP_PKT_LEN) +
+ (LP_PKT_LEN - 1)))));
return -EIO;
}
}
}
- dev_kfree_skb_any(skb_tmp);
+ dev_kfree_skb_any(skb_tmp);
+ nicif_info(nic_dev, drv, netdev, "Loopback test succeed\n");
+ return 0;
+}
+
+void hinic_lp_test(struct net_device *netdev, struct ethtool_test *eth_test,
+ u64 *data, u32 test_time)
+{
+ int err = 0;
+ u8 link_status = 0;
+ u8 *lb_test_rx_buf = NULL;
+ struct ethtool_test test = {0};
+ enum diag_test_index test_index = 0;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ memset(data, 0, (DIAG_TEST_MAX * sizeof(u64)));
+
+ /* Do not support loopback test when netdev is closed. */
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Do not support loopback test when netdev is closed.\n");
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[PORT_DOWN_ERR_IDX] = 1;
+ return;
+ }
+
+ test.flags = eth_test->flags;
+
+ if (test_time == 0)
+ test_time = LP_DEFAULT_TIME;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ if (!(test.flags & ETH_TEST_FL_EXTERNAL_LB)) {
+ test_index = INTERNAL_LP_TEST;
+
+ if (hinic_set_loopback_mode(nic_dev->hwdev, true)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to set port loopback mode before loopback test\n");
+ err = 1;
+ goto resume_link;
+ }
+ } else {
+ test_index = EXTERNAL_LP_TEST;
+ }
+
+ lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN);
+ if (!lb_test_rx_buf) {
+ err = 1;
+ } else {
+ nic_dev->lb_test_rx_buf = lb_test_rx_buf;
+ nic_dev->lb_pkt_len = LP_PKT_LEN;
+ set_bit(HINIC_LP_TEST, &nic_dev->flags);
+
+ if (hinic_run_lp_test(nic_dev, test_time))
+ err = 1;
+
+ clear_bit(HINIC_LP_TEST, &nic_dev->flags);
+ msleep(100);
+ vfree(lb_test_rx_buf);
+ nic_dev->lb_test_rx_buf = NULL;
+ }
+
+ if (!(test.flags & ETH_TEST_FL_EXTERNAL_LB)) {
+ if (hinic_set_loopback_mode(nic_dev->hwdev, false)) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to cancel port loopback mode after loopback test\n");
+ err = 1;
+
+ goto resume_link;
+ }
+ }
+
+resume_link:
+ if (err) {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[test_index] = 1;
+ }
+
+ netif_tx_wake_all_queues(netdev);
+ err = hinic_get_link_state(nic_dev->hwdev, &link_status);
+ if (!err && link_status)
+ netif_carrier_on(netdev);
+}
+
+static void hinic_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ nicif_err(nic_dev, drv, netdev, "Current function don't support self test\n");
+ return;
+ }
+
+ hinic_lp_test(netdev, eth_test, data, 0);
+}
+
+static int hinic_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 sfp_type;
+ u8 sfp_type_ext;
+ int err;
+
+ err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext);
+ if (err)
+ return err;
+
+ switch (sfp_type) {
+ case MODULE_TYPE_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case MODULE_TYPE_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE;
+ break;
+ case MODULE_TYPE_QSFP_PLUS:
+ if (sfp_type_ext >= 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE;
+
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE;
+ }
+ break;
+ case MODULE_TYPE_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE;
+ break;
+ default:
+ nicif_warn(nic_dev, drv, netdev,
+ "Optical module unknown: 0x%x\n", sfp_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
+ u16 len;
+ int err;
+
+ if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE))
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len);
+ if (err)
+ return err;
+
+ memcpy(data, sfp_data + ee->offset, ee->len);
+
+ return 0;
+}
+
+static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
+ struct nic_rss_type *rss_type)
+{
+ u8 rss_l4_en = 0;
+
+ switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_l4_en = 0;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_l4_en = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ rss_type->tcp_ipv4 = rss_l4_en;
+ break;
+ case TCP_V6_FLOW:
+ rss_type->tcp_ipv6 = rss_l4_en;
+ break;
+ case UDP_V4_FLOW:
+ rss_type->udp_ipv4 = rss_l4_en;
+ break;
+ case UDP_V6_FLOW:
+ rss_type->udp_ipv6 = rss_l4_en;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_rss_hash_opts(struct hinic_nic_dev *nic_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct nic_rss_type *rss_type = &nic_dev->rss_type;
+ int err;
+
+ if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ cmd->data = 0;
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "RSS is disable, not support to set flow-hash\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 |
+ RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST))
+ return -EINVAL;
+
+ err = hinic_get_rss_type(nic_dev->hwdev,
+ nic_dev->rss_tmpl_idx, rss_type);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n");
+ return -EFAULT;
+ }
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ err = set_l4_rss_hash_ops(cmd, rss_type);
+ if (err)
+ return err;
+
+ break;
+ case IPV4_FLOW:
+ rss_type->ipv4 = 1;
+ break;
+ case IPV6_FLOW:
+ rss_type->ipv6 = 1;
+ break;
+ default:
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unsupported flow type\n");
+ return -EINVAL;
+ }
+
+ err = hinic_set_rss_type(nic_dev->hwdev, nic_dev->rss_tmpl_idx,
+ *rss_type);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to set rss type\n");
+ return -EFAULT;
+ }
+
+ nicif_info(nic_dev, drv, nic_dev->netdev, "Set rss hash options success\n");
+
+ return 0;
+}
+
+static int hinic_get_rss_hash_opts(struct hinic_nic_dev *nic_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct nic_rss_type rss_type = {0};
+ int err;
+
+ cmd->data = 0;
+
+ if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
+ return 0;
+
+ err = hinic_get_rss_type(nic_dev->hwdev, nic_dev->rss_tmpl_idx,
+ &rss_type);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to get rss type\n");
+ return err;
+ }
+
+ cmd->data = RXH_IP_SRC | RXH_IP_DST;
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (rss_type.tcp_ipv4)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case TCP_V6_FLOW:
+ if (rss_type.tcp_ipv6)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (rss_type.udp_ipv4)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (rss_type.udp_ipv6)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unsupported flow type\n");
+ cmd->data = 0;
+ return -EINVAL;
+ }
+
return 0;
}
-static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time,
- enum diag_test_index *test_index)
+ static int hinic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
- struct net_device *netdev = nic_dev->netdev;
- u8 *lb_test_rx_buf = NULL;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
- if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
- *test_index = INTERNAL_LP_TEST;
- if (hinic_set_loopback_mode(nic_dev->hwdev,
- HINIC_INTERNAL_LP_MODE, true)) {
- netif_err(nic_dev, drv, netdev,
- "Failed to set port loopback mode before loopback test\n");
- return -EIO;
- }
- } else {
- *test_index = EXTERNAL_LP_TEST;
- }
-
- lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN);
- if (!lb_test_rx_buf) {
- err = -ENOMEM;
- } else {
- nic_dev->lb_test_rx_buf = lb_test_rx_buf;
- nic_dev->lb_pkt_len = LP_PKT_LEN;
- nic_dev->flags |= HINIC_LP_TEST;
- err = hinic_run_lp_test(nic_dev, test_time);
- nic_dev->flags &= ~HINIC_LP_TEST;
- msleep(100);
- vfree(lb_test_rx_buf);
- nic_dev->lb_test_rx_buf = NULL;
- }
-
- if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
- if (hinic_set_loopback_mode(nic_dev->hwdev,
- HINIC_INTERNAL_LP_MODE, false)) {
- netif_err(nic_dev, drv, netdev,
- "Failed to cancel port loopback mode after loopback test\n");
- err = -EIO;
- }
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = nic_dev->num_qps;
+ break;
+ case ETHTOOL_GRXFH:
+ err = hinic_get_rss_hash_opts(nic_dev, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
}
return err;
}
-static void hinic_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
+static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- enum hinic_port_link_state link_state;
- enum diag_test_index test_index = 0;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
- memset(data, 0, DIAG_TEST_MAX * sizeof(u64));
-
- /* don't support loopback test when netdev is closed. */
- if (!(nic_dev->flags & HINIC_INTF_UP)) {
- netif_err(nic_dev, drv, netdev,
- "Do not support loopback test when netdev is closed\n");
- eth_test->flags |= ETH_TEST_FL_FAILED;
- data[PORT_DOWN_ERR_IDX] = 1;
- return;
- }
-
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
-
- err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME,
- &test_index);
- if (err) {
- eth_test->flags |= ETH_TEST_FL_FAILED;
- data[test_index] = 1;
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ err = hinic_set_rss_hash_opts(nic_dev, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
}
- netif_tx_wake_all_queues(netdev);
+ return err;
+}
- err = hinic_port_link_state(nic_dev, &link_state);
- if (!err && link_state == HINIC_LINK_STATE_UP)
- netif_carrier_on(netdev);
+static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return HINIC_RSS_INDIR_SIZE;
}
-static int hinic_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
+static int __set_rss_rxfh(struct net_device *netdev,
+ const u32 *indir, const u8 *key)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
- u8 port;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err, i;
- port = nic_dev->hwdev->port_id;
+ if (indir) {
+ if (!nic_dev->rss_indir_user) {
+ nic_dev->rss_indir_user =
+ kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE,
+ GFP_KERNEL);
+ if (!nic_dev->rss_indir_user) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to alloc memory for rss_indir_usr\n");
+ return -ENOMEM;
+ }
+ }
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- err = hinic_set_led_status(nic_dev->hwdev, port,
- HINIC_LED_TYPE_LINK,
- HINIC_LED_MODE_FORCE_2HZ);
- if (err)
- netif_err(nic_dev, drv, netdev,
- "Set LED blinking in 2HZ failed\n");
- break;
+ memcpy(nic_dev->rss_indir_user, indir,
+ sizeof(u32) * HINIC_RSS_INDIR_SIZE);
- case ETHTOOL_ID_INACTIVE:
- err = hinic_reset_led_status(nic_dev->hwdev, port);
- if (err)
- netif_err(nic_dev, drv, netdev,
- "Reset LED to original status failed\n");
- break;
+ err = hinic_rss_set_indir_tbl(nic_dev->hwdev,
+ nic_dev->rss_tmpl_idx, indir);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to set rss indir table\n");
+ return -EFAULT;
+ }
- default:
- return -EOPNOTSUPP;
+ nicif_info(nic_dev, drv, netdev, "Change rss indir success\n");
}
- return err;
-}
+ if (key) {
+ if (!nic_dev->rss_hkey_user) {
+ /* We request double spaces for the hash key,
+ * the second one holds the key of Big Edian
+ * format.
+ */
+ nic_dev->rss_hkey_user =
+ kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL);
-static int hinic_get_module_info(struct net_device *netdev,
- struct ethtool_modinfo *modinfo)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u8 sfp_type_ext;
- u8 sfp_type;
- int err;
+ if (!nic_dev->rss_hkey_user) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to alloc memory for rss_hkey_user\n");
+ return -ENOMEM;
+ }
- err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext);
- if (err)
- return err;
+ /* The second space is for big edian hash key */
+ nic_dev->rss_hkey_user_be =
+ (u32 *)(nic_dev->rss_hkey_user +
+ HINIC_RSS_KEY_SIZE);
+ }
- switch (sfp_type) {
- case SFF8024_ID_SFP:
- modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
- break;
- case SFF8024_ID_QSFP_8438:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
- break;
- case SFF8024_ID_QSFP_8436_8636:
- if (sfp_type_ext >= 0x3) {
- modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE);
- } else {
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ /* make a copy of the key, and convert it to Big Endian */
+ memcpy(nic_dev->rss_hkey_user_be, key, HINIC_RSS_KEY_SIZE);
+ for (i = 0; i < HINIC_RSS_KEY_SIZE / 4; i++)
+ nic_dev->rss_hkey_user_be[i] =
+ cpu_to_be32(nic_dev->rss_hkey_user_be[i]);
+
+ err = hinic_rss_set_template_tbl(nic_dev->hwdev,
+ nic_dev->rss_tmpl_idx, key);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to set rss key\n");
+ return -EFAULT;
}
- break;
- case SFF8024_ID_QSFP28_8636:
- modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
- break;
- default:
- netif_warn(nic_dev, drv, netdev,
- "Optical module unknown: 0x%x\n", sfp_type);
- return -EINVAL;
+
+ nicif_info(nic_dev, drv, netdev, "Change rss key success\n");
}
return 0;
}
-static int hinic_get_module_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *ee, u8 *data)
+static u32 hinic_get_rxfh_key_size(struct net_device *netdev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
- u16 len;
- int err;
+ return HINIC_RSS_KEY_SIZE;
+}
- if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE))
- return -EINVAL;
+static int hinic_get_rxfh(struct net_device *netdev,
+ u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
- memset(data, 0, ee->len);
+ if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
+ return -EOPNOTSUPP;
- err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len);
- if (err)
- return err;
+ if (hfunc) {
+ u8 hash_engine_type = 0;
- memcpy(data, sfp_data + ee->offset, ee->len);
+ err = hinic_rss_get_hash_engine(nic_dev->hwdev,
+ nic_dev->rss_tmpl_idx,
+ &hash_engine_type);
+ if (err)
+ return -EFAULT;
- return 0;
+ *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
+ }
+
+ if (indir) {
+ err = hinic_rss_get_indir_tbl(nic_dev->hwdev,
+ nic_dev->rss_tmpl_idx, indir);
+ if (err)
+ return -EFAULT;
+ }
+
+ if (key)
+ err = hinic_rss_get_template_tbl(nic_dev->hwdev,
+ nic_dev->rss_tmpl_idx, key);
+
+ return err;
}
-static int
-hinic_get_link_ext_state(struct net_device *netdev,
- struct ethtool_link_ext_state_info *link_ext_state_info)
+static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
+
+ if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Not support to set rss parameters when rss is disable\n");
+ return -EOPNOTSUPP;
+ }
- if (netif_carrier_ok(netdev))
- return -ENODATA;
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) && indir) {
+ nicif_err(nic_dev, drv, netdev,
+ "Not support to set indir when DCB is enabled\n");
+ return -EOPNOTSUPP;
+ }
- if (nic_dev->cable_unplugged)
- link_ext_state_info->link_ext_state =
- ETHTOOL_LINK_EXT_STATE_NO_CABLE;
- else if (nic_dev->module_unrecognized)
- link_ext_state_info->link_ext_state =
- ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH;
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
+ if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) {
+ nicif_err(nic_dev, drv, netdev,
+ "Not support to set hfunc type except TOP and XOR\n");
+ return -EOPNOTSUPP;
+ }
- return 0;
+ nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
+ HINIC_RSS_HASH_ENGINE_TYPE_XOR :
+ HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
+ err = hinic_rss_set_hash_engine
+ (nic_dev->hwdev, nic_dev->rss_tmpl_idx,
+ nic_dev->rss_hash_engine);
+ if (err)
+ return -EFAULT;
+
+ nicif_info(nic_dev, drv, netdev,
+ "Change hfunc to RSS_HASH_%s success\n",
+ (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP");
+ }
+
+ err = __set_rss_rxfh(netdev, indir, key);
+
+ return err;
}
static const struct ethtool_ops hinic_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
- ETHTOOL_COALESCE_RX_MAX_FRAMES |
- ETHTOOL_COALESCE_TX_USECS |
- ETHTOOL_COALESCE_TX_MAX_FRAMES,
-
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS,
.get_link_ksettings = hinic_get_link_ksettings,
.set_link_ksettings = hinic_set_link_ksettings,
.get_drvinfo = hinic_get_drvinfo,
+ .get_msglevel = hinic_get_msglevel,
+ .set_msglevel = hinic_set_msglevel,
+ .nway_reset = hinic_nway_reset,
.get_link = ethtool_op_get_link,
- .get_link_ext_state = hinic_get_link_ext_state,
.get_ringparam = hinic_get_ringparam,
.set_ringparam = hinic_set_ringparam,
+ .get_pauseparam = hinic_get_pauseparam,
+ .set_pauseparam = hinic_set_pauseparam,
+ .get_sset_count = hinic_get_sset_count,
.get_coalesce = hinic_get_coalesce,
.set_coalesce = hinic_set_coalesce,
.get_per_queue_coalesce = hinic_get_per_queue_coalesce,
.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
- .get_pauseparam = hinic_get_pauseparam,
- .set_pauseparam = hinic_set_pauseparam,
- .get_channels = hinic_get_channels,
- .set_channels = hinic_set_channels,
- .get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
- .get_rxfh_key_size = hinic_get_rxfh_key_size,
- .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
- .get_rxfh = hinic_get_rxfh,
- .set_rxfh = hinic_set_rxfh,
- .get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
- .self_test = hinic_diag_test,
.set_phys_id = hinic_set_phys_id,
+ .self_test = hinic_diag_test,
+ .get_rxnfc = hinic_get_rxnfc,
+ .set_rxnfc = hinic_set_rxnfc,
+ .get_channels = hinic_get_channels,
+ .set_channels = hinic_set_channels,
.get_module_info = hinic_get_module_info,
.get_module_eeprom = hinic_get_module_eeprom,
+ .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
+ .get_rxfh_key_size = hinic_get_rxfh_key_size,
+ .get_rxfh = hinic_get_rxfh,
+ .set_rxfh = hinic_set_rxfh,
};
static const struct ethtool_ops hinicvf_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
- ETHTOOL_COALESCE_RX_MAX_FRAMES |
- ETHTOOL_COALESCE_TX_USECS |
- ETHTOOL_COALESCE_TX_MAX_FRAMES,
-
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH |
+ ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH |
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS,
.get_link_ksettings = hinic_get_link_ksettings,
.get_drvinfo = hinic_get_drvinfo,
+ .get_msglevel = hinic_get_msglevel,
+ .set_msglevel = hinic_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ringparam = hinic_get_ringparam,
.set_ringparam = hinic_set_ringparam,
+ .get_sset_count = hinic_get_sset_count,
.get_coalesce = hinic_get_coalesce,
.set_coalesce = hinic_set_coalesce,
.get_per_queue_coalesce = hinic_get_per_queue_coalesce,
.set_per_queue_coalesce = hinic_set_per_queue_coalesce,
- .get_channels = hinic_get_channels,
- .set_channels = hinic_set_channels,
+ .get_ethtool_stats = hinic_get_ethtool_stats,
+ .get_strings = hinic_get_strings,
.get_rxnfc = hinic_get_rxnfc,
.set_rxnfc = hinic_set_rxnfc,
- .get_rxfh_key_size = hinic_get_rxfh_key_size,
+
+ .get_channels = hinic_get_channels,
+ .set_channels = hinic_set_channels,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
+ .get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
- .get_sset_count = hinic_get_sset_count,
- .get_ethtool_stats = hinic_get_ethtool_stats,
- .get_strings = hinic_get_strings,
};
void hinic_set_ethtool_ops(struct net_device *netdev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
-
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- netdev->ethtool_ops = &hinic_ethtool_ops;
- else
- netdev->ethtool_ops = &hinicvf_ethtool_ops;
+ SET_ETHTOOL_OPS(netdev, &hinic_ethtool_ops);
}
+
+void hinicvf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &hinicvf_ethtool_ops);
+} /*lint -e766*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw.h b/drivers/net/ethernet/huawei/hinic/hinic_hw.h
new file mode 100644
index 000000000000..9f50bc27a856
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw.h
@@ -0,0 +1,770 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_HW_H_
+#define HINIC_HW_H_
+
+enum hinic_mod_type {
+ HINIC_MOD_COMM = 0, /* HW communication module */
+ HINIC_MOD_L2NIC = 1, /* L2NIC module */
+ HINIC_MOD_ROCE = 2,
+ HINIC_MOD_IWARP = 3,
+ HINIC_MOD_TOE = 4,
+ HINIC_MOD_FLR = 5,
+ HINIC_MOD_FCOE = 6,
+ HINIC_MOD_CFGM = 7, /* Configuration module */
+ HINIC_MOD_CQM = 8,
+ HINIC_MOD_VSWITCH = 9,
+ HINIC_MOD_FC = 10,
+ HINIC_MOD_OVS = 11,
+ HINIC_MOD_FIC = 12,
+ HINIC_MOD_MIGRATE = 13,
+ HINIC_MOD_HILINK = 14,
+ HINIC_MOD_HW_MAX = 16, /* hardware max module id */
+
+ /* Software module id, for PF/VF and multi-host */
+ HINIC_MOD_SW_FUNC = 17,
+ HINIC_MOD_MAX,
+};
+
+struct hinic_cmd_buf {
+ void *buf;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+enum hinic_ack_type {
+ HINIC_ACK_TYPE_CMDQ,
+ HINIC_ACK_TYPE_SHARE_CQN,
+ HINIC_ACK_TYPE_APP_CQN,
+
+ HINIC_MOD_ACK_MAX = 15,
+
+};
+
+#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
+
+int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+/* for pxe, ovs */
+int hinic_msg_to_mgmt_poll_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+/* PF/VF send msg to uP by api cmd, and return immediately */
+int hinic_msg_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size);
+
+int hinic_mbox_to_vf(void *hwdev, enum hinic_mod_type mod,
+ u16 vf_id, u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+int hinic_api_cmd_write_nack(void *hwdev, u8 dest,
+ void *cmd, u16 size);
+
+int hinic_api_cmd_read_ack(void *hwdev, u8 dest,
+ void *cmd, u16 size, void *ack, u16 ack_size);
+/* PF/VF send cmd to ucode by cmdq, and return if success.
+ * timeout=0, use default timeout.
+ */
+int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout);
+/* 1. whether need the timeout parameter
+ * 2. out_param indicates the status of the microcode processing command
+ */
+
+/* PF/VF send cmd to ucode by cmdq, and return detailed result.
+ * timeout=0, use default timeout.
+ */
+int hinic_cmdq_detail_resp(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in,
+ struct hinic_cmd_buf *buf_out, u32 timeout);
+
+/* PF/VF send cmd to ucode by cmdq, and return immediately */
+int hinic_cmdq_async(void *hwdev, enum hinic_ack_type ack_type,
+ enum hinic_mod_type mod, u8 cmd,
+ struct hinic_cmd_buf *buf_in);
+
+int hinic_ppf_tmr_start(void *hwdev);
+int hinic_ppf_tmr_stop(void *hwdev);
+
+/* CLP */
+int hinic_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+/* FOR windows */
+bool hinic_eq_intr_handler(void *hwdev, int msix_entry_idx);
+
+enum hinic_ceq_event {
+ HINIC_NON_L2NIC_SCQ,
+ HINIC_NON_L2NIC_ECQ,
+ HINIC_NON_L2NIC_NO_CQ_EQ,
+ HINIC_CMDQ,
+ HINIC_L2NIC_SQ,
+ HINIC_L2NIC_RQ,
+ HINIC_MAX_CEQ_EVENTS,
+};
+
+typedef void (*hinic_ceq_event_cb)(void *handle, u32 ceqe_data);
+int hinic_ceq_register_cb(void *hwdev, enum hinic_ceq_event event,
+ hinic_ceq_event_cb callback);
+void hinic_ceq_unregister_cb(void *hwdev, enum hinic_ceq_event event);
+
+enum hinic_aeq_type {
+ HINIC_HW_INTER_INT = 0,
+ HINIC_MBX_FROM_FUNC = 1,
+ HINIC_MSG_FROM_MGMT_CPU = 2,
+ HINIC_API_RSP = 3,
+ HINIC_API_CHAIN_STS = 4,
+ HINIC_MBX_SEND_RSLT = 5,
+ HINIC_MAX_AEQ_EVENTS
+};
+
+enum hinic_aeq_sw_type {
+ HINIC_STATELESS_EVENT = 0,
+ HINIC_STATEFULL_EVENT = 1,
+ HINIC_MAX_AEQ_SW_EVENTS
+};
+
+typedef void (*hinic_aeq_hwe_cb)(void *handle, u8 *data, u8 size);
+int hinic_aeq_register_hw_cb(void *hwdev, enum hinic_aeq_type event,
+ hinic_aeq_hwe_cb hwe_cb);
+void hinic_aeq_unregister_hw_cb(void *hwdev, enum hinic_aeq_type event);
+
+typedef u8 (*hinic_aeq_swe_cb)(void *handle, u8 event, u64 data);
+int hinic_aeq_register_swe_cb(void *hwdev, enum hinic_aeq_sw_type event,
+ hinic_aeq_swe_cb aeq_swe_cb);
+void hinic_aeq_unregister_swe_cb(void *hwdev, enum hinic_aeq_sw_type event);
+
+typedef void (*hinic_mgmt_msg_cb)(void *hwdev, void *pri_handle,
+ u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+int hinic_register_mgmt_msg_cb(void *hwdev,
+ enum hinic_mod_type mod, void *pri_handle,
+ hinic_mgmt_msg_cb callback);
+void hinic_unregister_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod);
+
+struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev);
+void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *buf);
+
+int hinic_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base);
+void hinic_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base);
+int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base);
+void hinic_free_db_addr(void *hwdev, void __iomem *db_base,
+ void __iomem *dwqe_base);
+
+struct nic_interrupt_info {
+ u32 lli_set;
+ u32 interrupt_coalesc_set;
+ u16 msix_index;
+ u8 lli_credit_limit;
+ u8 lli_timer_cfg;
+ u8 pending_limt;
+ u8 coalesc_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+int hinic_get_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info *interrupt_info);
+int hinic_set_interrupt_cfg_direct(void *hwdev,
+ struct nic_interrupt_info *interrupt_info);
+int hinic_set_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info interrupt_info);
+
+/* The driver code implementation interface */
+void hinic_misx_intr_clear_resend_bit(void *hwdev,
+ u16 msix_idx, u8 clear_resend_en);
+
+struct hinic_sq_attr {
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u64 ci_dma_base;
+};
+
+int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr);
+
+int hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz);
+int hinic_clean_root_ctxt(void *hwdev);
+void hinic_record_pcie_error(void *hwdev);
+
+int hinic_func_rx_tx_flush(void *hwdev);
+
+int hinic_func_tmr_bitmap_set(void *hwdev, bool enable);
+
+struct hinic_init_para {
+ /* Record hinic_pcidev or NDIS_Adapter pointer address */
+ void *adapter_hdl;
+ /* Record pcidev or Handler pointer address
+ * for example: ioremap interface input parameter
+ */
+ void *pcidev_hdl;
+ /* Record pcidev->dev or Handler pointer address which used to
+ * dma address application or dev_err print the parameter
+ */
+ void *dev_hdl;
+
+ void *cfg_reg_base; /* Configure virtual address, bar0/1 */
+ /* interrupt configuration register address, bar2/3 */
+ void *intr_reg_base;
+ u64 db_base_phy;
+ void *db_base; /* the doorbell address, bar4/5 higher 4M space */
+ void *dwqe_mapping; /* direct wqe 4M, follow the doorbell address */
+ void **hwdev;
+ void *chip_node;
+ /* In bmgw x86 host, driver can't send message to mgmt cpu directly,
+ * need to trasmit message ppf mbox to bmgw arm host.
+ */
+ void *ppf_hwdev;
+};
+
+#define MAX_FUNCTION_NUM 512
+#define HINIC_MAX_PF_NUM 16
+#define HINIC_MAX_COS 8
+#define INIT_FAILED 0
+#define INIT_SUCCESS 1
+#define MAX_DRV_BUF_SIZE 4096
+
+struct hinic_cmd_get_light_module_abs {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ u8 abs_status; /* 0:present, 1:absent */
+ u8 rsv[2];
+};
+
+#define MODULE_TYPE_SFP 0x3
+#define MODULE_TYPE_QSFP28 0x11
+#define MODULE_TYPE_QSFP 0x0C
+#define MODULE_TYPE_QSFP_PLUS 0x0D
+
+#define SFP_INFO_MAX_SIZE 512
+struct hinic_cmd_get_sfp_qsfp_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ u8 wire_type;
+ u16 out_len;
+ u8 sfp_qsfp_info[SFP_INFO_MAX_SIZE];
+};
+
+#define STD_SFP_INFO_MAX_SIZE 640
+struct hinic_cmd_get_std_sfp_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ u8 wire_type;
+ u16 eeprom_len;
+ u32 rsvd;
+ u8 sfp_info[STD_SFP_INFO_MAX_SIZE];
+};
+
+#define HINIC_MAX_PORT_ID 4
+
+struct hinic_port_routine_cmd {
+ int up_send_sfp_info;
+ int up_send_sfp_abs;
+
+ struct hinic_cmd_get_sfp_qsfp_info sfp_info;
+ struct hinic_cmd_get_light_module_abs abs;
+};
+
+struct card_node {
+ struct list_head node;
+ struct list_head func_list;
+ char chip_name[IFNAMSIZ];
+ void *log_info;
+ void *dbgtool_info;
+ void *func_handle_array[MAX_FUNCTION_NUM];
+ unsigned char dp_bus_num;
+ u8 func_num;
+ struct attribute dbgtool_attr_file;
+
+ bool cos_up_setted;
+ u8 cos_up[HINIC_MAX_COS];
+ bool ppf_state;
+ u8 pf_bus_num[HINIC_MAX_PF_NUM];
+ bool disable_vf_load[HINIC_MAX_PF_NUM];
+ u32 vf_mbx_old_rand_id[MAX_FUNCTION_NUM];
+ u32 vf_mbx_rand_id[MAX_FUNCTION_NUM];
+ struct hinic_port_routine_cmd rt_cmd[HINIC_MAX_PORT_ID];
+
+ /* mutex used for copy sfp info */
+ struct mutex sfp_mutex;
+};
+
+enum hinic_hwdev_init_state {
+ HINIC_HWDEV_NONE_INITED = 0,
+ HINIC_HWDEV_CLP_INITED,
+ HINIC_HWDEV_AEQ_INITED,
+ HINIC_HWDEV_MGMT_INITED,
+ HINIC_HWDEV_MBOX_INITED,
+ HINIC_HWDEV_CMDQ_INITED,
+ HINIC_HWDEV_COMM_CH_INITED,
+ HINIC_HWDEV_ALL_INITED,
+ HINIC_HWDEV_MAX_INVAL_INITED
+};
+
+enum hinic_func_mode {
+ FUNC_MOD_NORMAL_HOST,
+ FUNC_MOD_MULTI_BM_MASTER,
+ FUNC_MOD_MULTI_BM_SLAVE,
+ FUNC_MOD_MULTI_VM_MASTER,
+ FUNC_MOD_MULTI_VM_SLAVE,
+};
+
+enum hinic_func_cap {
+ /* send message to mgmt cpu directly */
+ HINIC_FUNC_MGMT = 1 << 0,
+ /* setting port attribute, pause/speed etc. */
+ HINIC_FUNC_PORT = 1 << 1,
+ /* Enable SR-IOV in default */
+ HINIC_FUNC_SRIOV_EN_DFLT = 1 << 2,
+ /* Can't change VF num */
+ HINIC_FUNC_SRIOV_NUM_FIX = 1 << 3,
+ /* Fcorce pf/vf link up */
+ HINIC_FUNC_FORCE_LINK_UP = 1 << 4,
+ /* Support rate limit */
+ HINIC_FUNC_SUPP_RATE_LIMIT = 1 << 5,
+ HINIC_FUNC_SUPP_DFX_REG = 1 << 6,
+ /* Support promisc/multicast/all-multi */
+ HINIC_FUNC_SUPP_RX_MODE = 1 << 7,
+ /* Set vf mac and vlan by ip link */
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN = 1 << 8,
+ /* Support set mac by ifconfig */
+ HINIC_FUNC_SUPP_CHANGE_MAC = 1 << 9,
+ /* OVS don't support SCTP_CRC/HW_VLAN/LRO */
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP = 1 << 10,
+ /* OVS don't support encap-tso/encap-csum */
+ HINIC_FUNC_SUPP_ENCAP_TSO_CSUM = 1 << 11,
+};
+
+#define FUNC_SUPPORT_MGMT(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & HINIC_FUNC_MGMT))
+#define FUNC_SUPPORT_PORT_SETTING(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & HINIC_FUNC_PORT))
+#define FUNC_SUPPORT_DCB(hwdev) \
+ (FUNC_SUPPORT_PORT_SETTING(hwdev))
+#define FUNC_ENABLE_SRIOV_IN_DEFAULT(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SRIOV_EN_DFLT))
+#define FUNC_SRIOV_FIX_NUM_VF(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SRIOV_NUM_FIX))
+#define FUNC_SUPPORT_RX_MODE(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SUPP_RX_MODE))
+#define FUNC_SUPPORT_RATE_LIMIT(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SUPP_RATE_LIMIT))
+#define FUNC_SUPPORT_SET_VF_MAC_VLAN(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN))
+#define FUNC_SUPPORT_CHANGE_MAC(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SUPP_CHANGE_MAC))
+#define FUNC_FORCE_LINK_UP(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_FORCE_LINK_UP))
+#define FUNC_SUPPORT_SCTP_CRC(hwdev) \
+ (!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP))
+#define FUNC_SUPPORT_HW_VLAN(hwdev) \
+ (!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP))
+#define FUNC_SUPPORT_LRO(hwdev) \
+ (!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP))
+#define FUNC_SUPPORT_ENCAP_TSO_CSUM(hwdev) \
+ (!!(hinic_get_func_feature_cap(hwdev) & \
+ HINIC_FUNC_SUPP_ENCAP_TSO_CSUM))
+
+int hinic_init_hwdev(struct hinic_init_para *para);
+int hinic_set_vf_dev_cap(void *hwdev);
+void hinic_free_hwdev(void *hwdev);
+void hinic_shutdown_hwdev(void *hwdev);
+void hinic_set_api_stop(void *hwdev);
+
+void hinic_ppf_hwdev_unreg(void *hwdev);
+void hinic_ppf_hwdev_reg(void *hwdev, void *ppf_hwdev);
+
+void hinic_qps_num_set(void *hwdev, u32 num_qps);
+
+bool hinic_is_hwdev_mod_inited(void *hwdev, enum hinic_hwdev_init_state state);
+enum hinic_func_mode hinic_get_func_mode(void *hwdev);
+u64 hinic_get_func_feature_cap(void *hwdev);
+
+enum hinic_service_mode {
+ HINIC_WORK_MODE_OVS = 0,
+ HINIC_WORK_MODE_UNKNOWN,
+ HINIC_WORK_MODE_NIC,
+ HINIC_WORK_MODE_INVALID = 0xFF,
+};
+
+enum hinic_service_mode hinic_get_service_mode(void *hwdev);
+
+int hinic_slq_init(void *dev, int num_wqs);
+void hinic_slq_uninit(void *dev);
+int hinic_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth,
+ u16 page_size, u64 *cla_addr, void **handle);
+void hinic_slq_free(void *dev, void *handle);
+u64 hinic_slq_get_addr(void *handle, u16 index);
+u64 hinic_slq_get_first_pageaddr(void *handle);
+
+typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in,
+ u16 in_size, void *buf_out,
+ u16 *out_size);
+
+void hinic_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd,
+ comm_up_self_msg_proc proc);
+
+void hinic_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd);
+
+int hinic_micro_log_path_set(void *hwdev, u8 *log_path);
+int hinic_micro_log_func_en(void *hwdev, u8 is_en);
+
+/* defined by chip */
+enum hinic_fault_type {
+ FAULT_TYPE_CHIP,
+ FAULT_TYPE_UCODE,
+ FAULT_TYPE_MEM_RD_TIMEOUT,
+ FAULT_TYPE_MEM_WR_TIMEOUT,
+ FAULT_TYPE_REG_RD_TIMEOUT,
+ FAULT_TYPE_REG_WR_TIMEOUT,
+ FAULT_TYPE_PHY_FAULT,
+ FAULT_TYPE_MAX,
+};
+
+/* defined by chip */
+enum hinic_fault_err_level {
+ /* default err_level=FAULT_LEVEL_FATAL if
+ * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT ||
+ * FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT ||
+ * FAULT_TYPE_UCODE
+ * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP
+ */
+ FAULT_LEVEL_FATAL,
+ FAULT_LEVEL_SERIOUS_RESET,
+ FAULT_LEVEL_SERIOUS_FLR,
+ FAULT_LEVEL_GENERAL,
+ FAULT_LEVEL_SUGGESTION,
+ FAULT_LEVEL_MAX
+};
+
+enum hinic_fault_source_type {
+ /* same as FAULT_TYPE_CHIP */
+ HINIC_FAULT_SRC_HW_MGMT_CHIP = 0,
+ /* same as FAULT_TYPE_UCODE */
+ HINIC_FAULT_SRC_HW_MGMT_UCODE,
+ /* same as FAULT_TYPE_MEM_RD_TIMEOUT */
+ HINIC_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT,
+ /* same as FAULT_TYPE_MEM_WR_TIMEOUT */
+ HINIC_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT,
+ /* same as FAULT_TYPE_REG_RD_TIMEOUT */
+ HINIC_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT,
+ /* same as FAULT_TYPE_REG_WR_TIMEOUT */
+ HINIC_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT,
+ HINIC_FAULT_SRC_SW_MGMT_UCODE,
+ HINIC_FAULT_SRC_MGMT_WATCHDOG,
+ HINIC_FAULT_SRC_MGMT_RESET = 8,
+ HINIC_FAULT_SRC_HW_PHY_FAULT,
+ HINIC_FAULT_SRC_HOST_HEARTBEAT_LOST = 20,
+ HINIC_FAULT_SRC_TYPE_MAX,
+};
+
+struct hinic_fault_sw_mgmt {
+ u8 event_id;
+ u64 event_data;
+};
+
+union hinic_fault_hw_mgmt {
+ u32 val[4];
+ /* valid only type==FAULT_TYPE_CHIP */
+ struct {
+ u8 node_id;
+ /* enum hinic_fault_err_level */
+ u8 err_level;
+ u16 err_type;
+ u32 err_csr_addr;
+ u32 err_csr_value;
+ /* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR */
+ u16 func_id;
+ u16 rsvd2;
+ } chip;
+
+ /* valid only type==FAULT_TYPE_UCODE */
+ struct {
+ u8 cause_id;
+ u8 core_id;
+ u8 c_id;
+ u8 rsvd3;
+ u32 epc;
+ u32 rsvd4;
+ u32 rsvd5;
+ } ucode;
+
+ /* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT ||
+ * FAULT_TYPE_MEM_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr_ctrl;
+ u32 err_csr_data;
+ u32 ctrl_tab;
+ u32 mem_index;
+ } mem_timeout;
+
+ /* valid only type==FAULT_TYPE_REG_RD_TIMEOUT ||
+ * FAULT_TYPE_REG_WR_TIMEOUT
+ */
+ struct {
+ u32 err_csr;
+ u32 rsvd6;
+ u32 rsvd7;
+ u32 rsvd8;
+ } reg_timeout;
+
+ struct {
+ /* 0: read; 1: write */
+ u8 op_type;
+ u8 port_id;
+ u8 dev_ad;
+ u8 rsvd9;
+ u32 csr_addr;
+ u32 op_data;
+ u32 rsvd10;
+ } phy_fault;
+};
+
+/* defined by chip */
+struct hinic_fault_event {
+ /* enum hinic_fault_type */
+ u8 type;
+ u8 fault_level; /* sdk write fault level for uld event */
+ u8 rsvd0[2];
+ union hinic_fault_hw_mgmt event;
+};
+
+struct hinic_dcb_state {
+ u8 dcb_on;
+ u8 default_cos;
+ u8 up_cos[8];
+};
+
+enum link_err_type {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+enum port_module_event_type {
+ HINIC_PORT_MODULE_CABLE_PLUGGED,
+ HINIC_PORT_MODULE_CABLE_UNPLUGGED,
+ HINIC_PORT_MODULE_LINK_ERR,
+ HINIC_PORT_MODULE_MAX_EVENT,
+};
+
+struct hinic_port_module_event {
+ enum port_module_event_type type;
+ enum link_err_type err_type;
+};
+
+struct hinic_event_link_info {
+ u8 valid;
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+};
+
+struct hinic_mctp_host_info {
+ u8 major_cmd;
+ u8 sub_cmd;
+ u8 rsvd[2];
+
+ u32 data_len;
+ void *data;
+};
+
+/* multi host mgmt event sub cmd */
+enum hinic_mhost_even_type {
+ HINIC_MHOST_NIC_STATE_CHANGE = 1,
+};
+
+struct hinic_mhost_nic_func_state {
+ u8 status;
+
+ u8 enable;
+ u16 func_idx;
+};
+
+struct hinic_multi_host_mgmt_event {
+ u16 sub_cmd;
+ u16 rsvd[3];
+
+ void *data;
+};
+
+enum hinic_event_type {
+ HINIC_EVENT_LINK_DOWN = 0,
+ HINIC_EVENT_LINK_UP = 1,
+ HINIC_EVENT_HEART_LOST = 2,
+ HINIC_EVENT_FAULT = 3,
+ HINIC_EVENT_NOTIFY_VF_DCB_STATE = 4,
+ HINIC_EVENT_DCB_STATE_CHANGE = 5,
+ HINIC_EVENT_FMW_ACT_NTC = 6,
+ HINIC_EVENT_PORT_MODULE_EVENT = 7,
+ HINIC_EVENT_MCTP_GET_HOST_INFO,
+ HINIC_EVENT_MULTI_HOST_MGMT,
+ HINIC_EVENT_INIT_MIGRATE_PF,
+ HINIC_EVENT_MGMT_WATCHDOG_EVENT,
+};
+
+struct hinic_event_info {
+ enum hinic_event_type type;
+ union {
+ struct hinic_event_link_info link_info;
+ struct hinic_fault_event info;
+ struct hinic_dcb_state dcb_state;
+ struct hinic_port_module_event module_event;
+ u8 vf_default_cos;
+ struct hinic_mctp_host_info mctp_info;
+ struct hinic_multi_host_mgmt_event mhost_mgmt;
+ };
+};
+
+enum hinic_ucode_event_type {
+ HINIC_INTERNAL_TSO_FATAL_ERROR = 0x0,
+ HINIC_INTERNAL_LRO_FATAL_ERROR = 0x1,
+ HINIC_INTERNAL_TX_FATAL_ERROR = 0x2,
+ HINIC_INTERNAL_RX_FATAL_ERROR = 0x3,
+ HINIC_INTERNAL_OTHER_FATAL_ERROR = 0x4,
+ HINIC_NIC_FATAL_ERROR_MAX = 0x8,
+};
+
+typedef void (*hinic_event_handler)(void *handle,
+ struct hinic_event_info *event);
+
+/* only register once */
+void hinic_event_register(void *dev, void *pri_handle,
+ hinic_event_handler callback);
+void hinic_event_unregister(void *dev);
+
+void hinic_detect_hw_present(void *hwdev);
+
+void hinic_set_chip_absent(void *hwdev);
+
+int hinic_get_chip_present_flag(void *hwdev);
+
+void hinic_set_pcie_order_cfg(void *handle);
+
+int hinic_get_mgmt_channel_status(void *handle);
+
+enum hinic_led_mode {
+ HINIC_LED_MODE_ON,
+ HINIC_LED_MODE_OFF,
+ HINIC_LED_MODE_FORCE_1HZ,
+ HINIC_LED_MODE_FORCE_2HZ,
+ HINIC_LED_MODE_FORCE_4HZ,
+ HINIC_LED_MODE_1HZ,
+ HINIC_LED_MODE_2HZ,
+ HINIC_LED_MODE_4HZ,
+ HINIC_LED_MODE_INVALID,
+};
+
+enum hinic_led_type {
+ HINIC_LED_TYPE_LINK,
+ HINIC_LED_TYPE_LOW_SPEED,
+ HINIC_LED_TYPE_HIGH_SPEED,
+ HINIC_LED_TYPE_INVALID,
+};
+
+int hinic_reset_led_status(void *hwdev, u8 port);
+int hinic_set_led_status(void *hwdev, u8 port, enum hinic_led_type type,
+ enum hinic_led_mode mode);
+
+struct hinic_board_info {
+ u32 board_type;
+ u32 port_num;
+ u32 port_speed;
+ u32 pcie_width;
+ u32 host_num;
+ u32 pf_num;
+ u32 vf_total_num;
+ u32 tile_num;
+ u32 qcm_num;
+ u32 core_num;
+ u32 work_mode;
+ u32 service_mode;
+ u32 pcie_mode;
+ u32 cfg_addr;
+ u32 boot_sel;
+ u32 board_id;
+};
+
+int hinic_get_board_info(void *hwdev, struct hinic_board_info *info);
+bool hinic_get_ppf_status(void *hwdev);
+
+struct hw_pf_info {
+ u16 glb_func_idx;
+ u16 glb_pf_vf_offset;
+ u8 p2p_idx;
+ u8 itf_idx;
+ u16 max_vfs;
+ u16 max_queue_num;
+ u16 ovs_q_vf_num[9];
+ u32 resv;
+};
+
+struct hinic_hw_pf_infos {
+ u8 num_pfs;
+ u8 rsvd1[3];
+
+ struct hw_pf_info infos[16];
+};
+
+int hinic_get_hw_pf_infos(void *hwdev, struct hinic_hw_pf_infos *infos);
+int hinic_set_ip_check(void *hwdev, bool ip_check_ctl);
+int hinic_mbox_to_host_sync(void *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+int hinic_mbox_ppf_to_vf(void *hwdev, enum hinic_mod_type mod, u16 func_id,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_get_card_present_state(void *hwdev, bool *card_present_state);
+
+void hinic_migrate_report(void *dev);
+int hinic_set_vxlan_udp_dport(void *hwdev, u32 udp_port);
+bool is_multi_vm_slave(void *hwdev);
+bool is_multi_bm_slave(void *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
deleted file mode 100644
index 998717f02136..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ /dev/null
@@ -1,987 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
-#include <linux/err.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/log2.h>
-#include <linux/semaphore.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_api_cmd.h"
-
-#define API_CHAIN_NUM_CELLS 32
-
-#define API_CMD_CELL_SIZE_SHIFT 6
-#define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT))
-
-#define API_CMD_CELL_SIZE(cell_size) \
- (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \
- (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN)
-
-#define API_CMD_CELL_SIZE_VAL(size) \
- ilog2((size) >> API_CMD_CELL_SIZE_SHIFT)
-
-#define API_CMD_BUF_SIZE 2048
-
-/* Sizes of the members in hinic_api_cmd_cell */
-#define API_CMD_CELL_DESC_SIZE 8
-#define API_CMD_CELL_DATA_ADDR_SIZE 8
-
-#define API_CMD_CELL_ALIGNMENT 8
-
-#define API_CMD_TIMEOUT 1000
-
-#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1))
-
-#define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3)
-#define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2)
-
-#define RD_DMA_ATTR_DEFAULT 0
-#define WR_DMA_ATTR_DEFAULT 0
-
-enum api_cmd_data_format {
- SGE_DATA = 1, /* cell data is passed by hw address */
-};
-
-enum api_cmd_type {
- API_CMD_WRITE = 0,
-};
-
-enum api_cmd_bypass {
- NO_BYPASS = 0,
- BYPASS = 1,
-};
-
-enum api_cmd_xor_chk_level {
- XOR_CHK_DIS = 0,
-
- XOR_CHK_ALL = 3,
-};
-
-static u8 xor_chksum_set(void *data)
-{
- int idx;
- u8 *val, checksum = 0;
-
- val = data;
-
- for (idx = 0; idx < 7; idx++)
- checksum ^= val[idx];
-
- return checksum;
-}
-
-static void set_prod_idx(struct hinic_api_cmd_chain *chain)
-{
- enum hinic_api_cmd_chain_type chain_type = chain->chain_type;
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, prod_idx;
-
- addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type);
- prod_idx = hinic_hwif_read_reg(hwif, addr);
-
- prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX);
-
- prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX);
-
- hinic_hwif_write_reg(hwif, addr, prod_idx);
-}
-
-static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain)
-{
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(chain->hwif, addr);
-
- return HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
-}
-
-static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain)
-{
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(chain->hwif, addr);
-
- dev_err(&chain->hwif->pdev->dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n",
- chain->chain_type, HINIC_API_CMD_STATUS_GET(val, CPLD_ERR),
- HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR),
- HINIC_API_CMD_STATUS_GET(val, FSM));
-
- dev_err(&chain->hwif->pdev->dev, "Chain hw current ci: 0x%x\n",
- HINIC_API_CMD_STATUS_GET(val, CONS_IDX));
-
- addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(chain->hwif, addr);
- dev_err(&chain->hwif->pdev->dev, "Chain hw current pi: 0x%x\n", val);
-}
-
-/**
- * chain_busy - check if the chain is still processing last requests
- * @chain: chain to check
- *
- * Return 0 - Success, negative - Failure
- **/
-static int chain_busy(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u32 prod_idx;
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- chain->cons_idx = get_hw_cons_idx(chain);
- prod_idx = chain->prod_idx;
-
- /* check for a space for a new command */
- if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) {
- dev_err(&pdev->dev, "API CMD chain %d is busy, cons_idx: %d, prod_idx: %d\n",
- chain->chain_type, chain->cons_idx,
- chain->prod_idx);
- dump_api_chain_reg(chain);
- return -EBUSY;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unknown API CMD Chain type\n");
- break;
- }
-
- return 0;
-}
-
-/**
- * get_cell_data_size - get the data size of a specific cell type
- * @type: chain type
- *
- * Return the data(Desc + Address) size in the cell
- **/
-static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type)
-{
- u8 cell_data_size = 0;
-
- switch (type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
- API_CMD_CELL_DATA_ADDR_SIZE,
- API_CMD_CELL_ALIGNMENT);
- break;
- default:
- break;
- }
-
- return cell_data_size;
-}
-
-/**
- * prepare_cell_ctrl - prepare the ctrl of the cell for the command
- * @cell_ctrl: the control of the cell to set the control value into it
- * @data_size: the size of the data in the cell
- **/
-static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size)
-{
- u8 chksum;
- u64 ctrl;
-
- ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) |
- HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) |
- HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR);
-
- chksum = xor_chksum_set(&ctrl);
-
- ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM);
-
- /* The data in the HW should be in Big Endian Format */
- *cell_ctrl = cpu_to_be64(ctrl);
-}
-
-/**
- * prepare_api_cmd - prepare API CMD command
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @cmd_size: the command size
- **/
-static void prepare_api_cmd(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest,
- void *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell *cell = chain->curr_node;
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cell_ctxt = &chain->cell_ctxt[chain->prod_idx];
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) |
- HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) |
- HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS);
- break;
-
- default:
- dev_err(&pdev->dev, "unknown Chain type\n");
- return;
- }
-
- cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) |
- HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE);
-
- cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc),
- XOR_CHKSUM);
-
- /* The data in the HW should be in Big Endian Format */
- cell->desc = cpu_to_be64(cell->desc);
-
- memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size);
-}
-
-/**
- * prepare_cell - prepare cell ctrl and cmd in the current cell
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @cmd_size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-static void prepare_cell(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest,
- void *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell *curr_node = chain->curr_node;
- u16 data_size = get_cell_data_size(chain->chain_type);
-
- prepare_cell_ctrl(&curr_node->ctrl, data_size);
- prepare_api_cmd(chain, dest, cmd, cmd_size);
-}
-
-static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain)
-{
- chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1);
-}
-
-/**
- * api_cmd_status_update - update the status in the chain struct
- * @chain: chain to update
- **/
-static void api_cmd_status_update(struct hinic_api_cmd_chain *chain)
-{
- enum hinic_api_cmd_chain_type chain_type;
- struct hinic_api_cmd_status *wb_status;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u64 status_header;
- u32 status;
-
- wb_status = chain->wb_status;
- status_header = be64_to_cpu(wb_status->header);
-
- status = be32_to_cpu(wb_status->status);
- if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) {
- dev_err(&pdev->dev, "API CMD status: Xor check error\n");
- return;
- }
-
- chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID);
- if (chain_type >= HINIC_API_CMD_MAX) {
- dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type);
- return;
- }
-
- chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX);
-}
-
-/**
- * wait_for_status_poll - wait for write to api cmd command to complete
- * @chain: the chain of the command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wait_for_status_poll(struct hinic_api_cmd_chain *chain)
-{
- int err = -ETIMEDOUT;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
- do {
- api_cmd_status_update(chain);
-
- /* wait for CI to be updated - sign for completion */
- if (chain->cons_idx == chain->prod_idx) {
- err = 0;
- break;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- return err;
-}
-
-/**
- * wait_for_api_cmd_completion - wait for command to complete
- * @chain: chain for the command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- err = wait_for_status_poll(chain);
- if (err) {
- dev_err(&pdev->dev, "API CMD Poll status timeout\n");
- dump_api_chain_reg(chain);
- break;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "unknown API CMD Chain type\n");
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-/**
- * api_cmd - API CMD command
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @cmd_size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell_ctxt *ctxt;
- int err;
-
- down(&chain->sem);
- if (chain_busy(chain)) {
- up(&chain->sem);
- return -EBUSY;
- }
-
- prepare_cell(chain, dest, cmd, cmd_size);
- cmd_chain_prod_idx_inc(chain);
-
- wmb(); /* inc pi before issue the command */
-
- set_prod_idx(chain); /* issue the command */
-
- ctxt = &chain->cell_ctxt[chain->prod_idx];
-
- chain->curr_node = ctxt->cell_vaddr;
-
- err = wait_for_api_cmd_completion(chain);
-
- up(&chain->sem);
- return err;
-}
-
-/**
- * hinic_api_cmd_write - Write API CMD command
- * @chain: chain for write command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 size)
-{
- /* Verify the chain type */
- if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- return api_cmd(chain, dest, cmd, size);
-
- return -EINVAL;
-}
-
-/**
- * api_cmd_hw_restart - restart the chain in the HW
- * @chain: the API CMD specific chain to restart
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- int err = -ETIMEDOUT;
- unsigned long end;
- u32 reg_addr, val;
-
- /* Read Modify Write */
- reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(hwif, reg_addr);
-
- val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART);
- val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART);
-
- hinic_hwif_write_reg(hwif, reg_addr, val);
-
- end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
- do {
- val = hinic_hwif_read_reg(hwif, reg_addr);
-
- if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) {
- err = 0;
- break;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- return err;
-}
-
-/**
- * api_cmd_ctrl_init - set the control register of a chain
- * @chain: the API CMD specific chain to set control register for
- **/
-static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, ctrl;
- u16 cell_size;
-
- /* Read Modify Write */
- addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
-
- cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size);
-
- ctrl = hinic_hwif_read_reg(hwif, addr);
-
- ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
-
- ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) |
- HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) |
- HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE);
-
- hinic_hwif_write_reg(hwif, addr, ctrl);
-}
-
-/**
- * api_cmd_set_status_addr - set the status address of a chain in the HW
- * @chain: the API CMD specific chain to set in HW status address for
- **/
-static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type);
- val = upper_32_bits(chain->wb_status_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type);
- val = lower_32_bits(chain->wb_status_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_set_num_cells - set the number cells of a chain in the HW
- * @chain: the API CMD specific chain to set in HW the number of cells for
- **/
-static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type);
- val = chain->num_cells;
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_head_init - set the head of a chain in the HW
- * @chain: the API CMD specific chain to set in HW the head for
- **/
-static void api_cmd_head_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type);
- val = upper_32_bits(chain->head_cell_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type);
- val = lower_32_bits(chain->head_cell_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_chain_hw_clean - clean the HW
- * @chain: the API CMD specific chain
- **/
-static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, ctrl;
-
- addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
-
- ctrl = hinic_hwif_read_reg(hwif, addr);
- ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
-
- hinic_hwif_write_reg(hwif, addr, ctrl);
-}
-
-/**
- * api_cmd_chain_hw_init - initialize the chain in the HW
- * @chain: the API CMD specific chain to initialize in HW
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- api_cmd_chain_hw_clean(chain);
-
- api_cmd_set_status_addr(chain);
-
- err = api_cmd_hw_restart(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to restart API CMD HW\n");
- return err;
- }
-
- api_cmd_ctrl_init(chain);
- api_cmd_set_num_cells(chain);
- api_cmd_head_init(chain);
- return 0;
-}
-
-/**
- * free_cmd_buf - free the dma buffer of API CMD command
- * @chain: the API CMD specific chain of the cmd
- * @cell_idx: the cell index of the cmd
- **/
-static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- cell_ctxt->api_cmd_vaddr,
- cell_ctxt->api_cmd_paddr);
-}
-
-/**
- * alloc_cmd_buf - allocate a dma buffer for API CMD command
- * @chain: the API CMD specific chain for the cmd
- * @cell: the cell in the HW for the cmd
- * @cell_idx: the index of the cell
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
- struct hinic_api_cmd_cell *cell, int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- dma_addr_t cmd_paddr;
- u8 *cmd_vaddr;
- int err = 0;
-
- cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- &cmd_paddr, GFP_KERNEL);
- if (!cmd_vaddr)
- return -ENOMEM;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- cell_ctxt->api_cmd_vaddr = cmd_vaddr;
- cell_ctxt->api_cmd_paddr = cmd_paddr;
-
- /* set the cmd DMA address in the cell */
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- /* The data in the HW should be in Big Endian Format */
- cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr);
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- free_cmd_buf(chain, cell_idx);
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-/**
- * api_cmd_create_cell - create API CMD cell for specific chain
- * @chain: the API CMD specific chain to create its cell
- * @cell_idx: the index of the cell to create
- * @pre_node: previous cell
- * @node_vaddr: the returned virt addr of the cell
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
- int cell_idx,
- struct hinic_api_cmd_cell *pre_node,
- struct hinic_api_cmd_cell **node_vaddr)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_cell *node;
- dma_addr_t node_paddr;
- int err;
-
- node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
- GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- node->read.hw_wb_resp_paddr = 0;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
- cell_ctxt->cell_vaddr = node;
- cell_ctxt->cell_paddr = node_paddr;
-
- if (!pre_node) {
- chain->head_cell_paddr = node_paddr;
- chain->head_node = node;
- } else {
- /* The data in the HW should be in Big Endian Format */
- pre_node->next_cell_paddr = cpu_to_be64(node_paddr);
- }
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- err = alloc_cmd_buf(chain, node, cell_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmd buffer\n");
- goto err_alloc_cmd_buf;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- err = -EINVAL;
- goto err_alloc_cmd_buf;
- }
-
- *node_vaddr = node;
- return 0;
-
-err_alloc_cmd_buf:
- dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr);
- return err;
-}
-
-/**
- * api_cmd_destroy_cell - destroy API CMD cell of specific chain
- * @chain: the API CMD specific chain to destroy its cell
- * @cell_idx: the cell to destroy
- **/
-static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain,
- int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_cell *node;
- dma_addr_t node_paddr;
- size_t node_size;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- node = cell_ctxt->cell_vaddr;
- node_paddr = cell_ctxt->cell_paddr;
- node_size = chain->cell_size;
-
- if (cell_ctxt->api_cmd_vaddr) {
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- free_cmd_buf(chain, cell_idx);
- break;
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- break;
- }
-
- dma_free_coherent(&pdev->dev, node_size, node,
- node_paddr);
- }
-}
-
-/**
- * api_cmd_destroy_cells - destroy API CMD cells of specific chain
- * @chain: the API CMD specific chain to destroy its cells
- * @num_cells: number of cells to destroy
- **/
-static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain,
- int num_cells)
-{
- int cell_idx;
-
- for (cell_idx = 0; cell_idx < num_cells; cell_idx++)
- api_cmd_destroy_cell(chain, cell_idx);
-}
-
-/**
- * api_cmd_create_cells - create API CMD cells for specific chain
- * @chain: the API CMD specific chain
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err, cell_idx;
-
- for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) {
- err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
- if (err) {
- dev_err(&pdev->dev, "Failed to create API CMD cell\n");
- goto err_create_cell;
- }
-
- pre_node = node;
- }
-
- /* set the Final node to point on the start */
- node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
-
- /* set the current node to be the head */
- chain->curr_node = chain->head_node;
- return 0;
-
-err_create_cell:
- api_cmd_destroy_cells(chain, cell_idx);
- return err;
-}
-
-/**
- * api_chain_init - initialize API CMD specific chain
- * @chain: the API CMD specific chain to initialize
- * @attr: attributes to set in the chain
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_chain_init(struct hinic_api_cmd_chain *chain,
- struct hinic_api_cmd_chain_attr *attr)
-{
- struct hinic_hwif *hwif = attr->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- chain->hwif = hwif;
- chain->chain_type = attr->chain_type;
- chain->num_cells = attr->num_cells;
- chain->cell_size = attr->cell_size;
-
- chain->prod_idx = 0;
- chain->cons_idx = 0;
-
- sema_init(&chain->sem, 1);
-
- chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells,
- sizeof(*chain->cell_ctxt), GFP_KERNEL);
- if (!chain->cell_ctxt)
- return -ENOMEM;
-
- chain->wb_status = dma_alloc_coherent(&pdev->dev,
- sizeof(*chain->wb_status),
- &chain->wb_status_paddr,
- GFP_KERNEL);
- if (!chain->wb_status) {
- dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * api_chain_free - free API CMD specific chain
- * @chain: the API CMD specific chain to free
- **/
-static void api_chain_free(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status),
- chain->wb_status, chain->wb_status_paddr);
-}
-
-/**
- * api_cmd_create_chain - create API CMD specific chain
- * @attr: attributes to set the chain
- *
- * Return the created chain
- **/
-static struct hinic_api_cmd_chain *
- api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr)
-{
- struct hinic_hwif *hwif = attr->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_chain *chain;
- int err;
-
- if (attr->num_cells & (attr->num_cells - 1)) {
- dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n");
- return ERR_PTR(-EINVAL);
- }
-
- chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
- if (!chain)
- return ERR_PTR(-ENOMEM);
-
- err = api_chain_init(chain, attr);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize chain\n");
- return ERR_PTR(err);
- }
-
- err = api_cmd_create_cells(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n");
- goto err_create_cells;
- }
-
- err = api_cmd_chain_hw_init(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize chain HW\n");
- goto err_chain_hw_init;
- }
-
- return chain;
-
-err_chain_hw_init:
- api_cmd_destroy_cells(chain, chain->num_cells);
-
-err_create_cells:
- api_chain_free(chain);
- return ERR_PTR(err);
-}
-
-/**
- * api_cmd_destroy_chain - destroy API CMD specific chain
- * @chain: the API CMD specific chain to destroy
- **/
-static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain)
-{
- api_cmd_chain_hw_clean(chain);
- api_cmd_destroy_cells(chain, chain->num_cells);
- api_chain_free(chain);
-}
-
-/**
- * hinic_api_cmd_init - Initialize all the API CMD chains
- * @chain: the API CMD chains that are initialized
- * @hwif: the hardware interface of a pci function device
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
- struct hinic_hwif *hwif)
-{
- enum hinic_api_cmd_chain_type type, chain_type;
- struct hinic_api_cmd_chain_attr attr;
- struct pci_dev *pdev = hwif->pdev;
- size_t hw_cell_sz;
- int err;
-
- hw_cell_sz = sizeof(struct hinic_api_cmd_cell);
-
- attr.hwif = hwif;
- attr.num_cells = API_CHAIN_NUM_CELLS;
- attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz);
-
- chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
- attr.chain_type = chain_type;
-
- if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- chain[chain_type] = api_cmd_create_chain(&attr);
- if (IS_ERR(chain[chain_type])) {
- dev_err(&pdev->dev, "Failed to create chain %d\n",
- chain_type);
- err = PTR_ERR(chain[chain_type]);
- goto err_create_chain;
- }
- }
-
- return 0;
-
-err_create_chain:
- type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; type < chain_type; type++) {
- if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- api_cmd_destroy_chain(chain[type]);
- }
-
- return err;
-}
-
-/**
- * hinic_api_cmd_free - free the API CMD chains
- * @chain: the API CMD chains that are freed
- **/
-void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain)
-{
- enum hinic_api_cmd_chain_type chain_type;
-
- chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
- if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- api_cmd_destroy_chain(chain[chain_type]);
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
deleted file mode 100644
index 6d1654b050ad..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_API_CMD_H
-#define HINIC_HW_API_CMD_H
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-
-#include "hinic_hw_if.h"
-
-#define HINIC_API_CMD_PI_IDX_SHIFT 0
-
-#define HINIC_API_CMD_PI_IDX_MASK 0xFFFFFF
-
-#define HINIC_API_CMD_PI_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_PI_##member##_MASK) << \
- HINIC_API_CMD_PI_##member##_SHIFT)
-
-#define HINIC_API_CMD_PI_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_PI_##member##_MASK \
- << HINIC_API_CMD_PI_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
-
-#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1
-
-#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \
- HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)
-
-#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \
- (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
- HINIC_API_CMD_CHAIN_REQ_##member##_MASK)
-
-#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \
- << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_SHIFT 1
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
-#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
-#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
-#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
-
-#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3
-#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3
-
-#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
- HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
-
-#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \
- << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_SHIFT 0
-#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_SHIFT 16
-#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_SHIFT 24
-#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
-
-#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFF
-
-#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \
- ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \
- HINIC_API_CMD_CELL_CTRL_##member##_SHIFT)
-
-#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0
-#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1
-#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
-#define HINIC_API_CMD_DESC_DEST_SHIFT 32
-#define HINIC_API_CMD_DESC_SIZE_SHIFT 40
-#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
-
-#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1
-#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1
-#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1
-#define HINIC_API_CMD_DESC_DEST_MASK 0x1F
-#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FF
-#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFF
-
-#define HINIC_API_CMD_DESC_SET(val, member) \
- ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \
- HINIC_API_CMD_DESC_##member##_SHIFT)
-
-#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
-
-#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFF
-
-#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \
- (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
- HINIC_API_CMD_STATUS_HEADER_##member##_MASK)
-
-#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0
-#define HINIC_API_CMD_STATUS_FSM_SHIFT 24
-#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
-#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30
-
-#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF
-#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU
-#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3
-#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U
-
-#define HINIC_API_CMD_STATUS_GET(val, member) \
- (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \
- HINIC_API_CMD_STATUS_##member##_MASK)
-
-enum hinic_api_cmd_chain_type {
- HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2,
-
- HINIC_API_CMD_MAX,
-};
-
-struct hinic_api_cmd_chain_attr {
- struct hinic_hwif *hwif;
- enum hinic_api_cmd_chain_type chain_type;
-
- u32 num_cells;
- u16 cell_size;
-};
-
-struct hinic_api_cmd_status {
- u64 header;
- u32 status;
- u32 rsvd0;
- u32 rsvd1;
- u32 rsvd2;
- u64 rsvd3;
-};
-
-/* HW struct */
-struct hinic_api_cmd_cell {
- u64 ctrl;
-
- /* address is 64 bit in HW struct */
- u64 next_cell_paddr;
-
- u64 desc;
-
- /* HW struct */
- union {
- struct {
- u64 hw_cmd_paddr;
- } write;
-
- struct {
- u64 hw_wb_resp_paddr;
- u64 hw_cmd_paddr;
- } read;
- };
-};
-
-struct hinic_api_cmd_cell_ctxt {
- dma_addr_t cell_paddr;
- struct hinic_api_cmd_cell *cell_vaddr;
-
- dma_addr_t api_cmd_paddr;
- u8 *api_cmd_vaddr;
-};
-
-struct hinic_api_cmd_chain {
- struct hinic_hwif *hwif;
- enum hinic_api_cmd_chain_type chain_type;
-
- u32 num_cells;
- u16 cell_size;
-
- /* HW members in 24 bit format */
- u32 prod_idx;
- u32 cons_idx;
-
- struct semaphore sem;
-
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
-
- dma_addr_t wb_status_paddr;
- struct hinic_api_cmd_status *wb_status;
-
- dma_addr_t head_cell_paddr;
- struct hinic_api_cmd_cell *head_node;
- struct hinic_api_cmd_cell *curr_node;
-};
-
-int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 size);
-
-int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
- struct hinic_hwif *hwif);
-
-void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
deleted file mode 100644
index d39eec9c62bf..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ /dev/null
@@ -1,963 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/spinlock.h>
-#include <linux/sizes.h>
-#include <linux/atomic.h>
-#include <linux/log2.h>
-#include <linux/io.h>
-#include <linux/completion.h>
-#include <linux/err.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_io.h"
-#include "hinic_hw_dev.h"
-
-#define CMDQ_CEQE_TYPE_SHIFT 0
-
-#define CMDQ_CEQE_TYPE_MASK 0x7
-
-#define CMDQ_CEQE_GET(val, member) \
- (((val) >> CMDQ_CEQE_##member##_SHIFT) \
- & CMDQ_CEQE_##member##_MASK)
-
-#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
-
-#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
-
-#define CMDQ_WQE_ERRCODE_GET(val, member) \
- (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
- & CMDQ_WQE_ERRCODE_##member##_MASK)
-
-#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
-
-#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
-
-#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
-
-#define CMDQ_WQE_COMPLETED(ctrl_info) \
- HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
-
-#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
-
-#define CMDQ_DB_OFF SZ_2K
-
-#define CMDQ_WQEBB_SIZE 64
-#define CMDQ_WQE_SIZE 64
-#define CMDQ_DEPTH SZ_4K
-
-#define CMDQ_WQ_PAGE_SIZE SZ_256K
-
-#define WQE_LCMD_SIZE 64
-#define WQE_SCMD_SIZE 64
-
-#define COMPLETE_LEN 3
-
-#define CMDQ_TIMEOUT 1000
-
-#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
-
-#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
- struct hinic_cmdqs, cmdq[0])
-
-#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
- struct hinic_func_to_io, \
- cmdqs)
-
-enum completion_format {
- COMPLETE_DIRECT = 0,
- COMPLETE_SGE = 1,
-};
-
-enum data_format {
- DATA_SGE = 0,
- DATA_DIRECT = 1,
-};
-
-enum bufdesc_len {
- BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */
- BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */
-};
-
-enum ctrl_sect_len {
- CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
- CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
-};
-
-enum cmdq_scmd_type {
- CMDQ_SET_ARM_CMD = 2,
-};
-
-enum cmdq_cmd_type {
- CMDQ_CMD_SYNC_DIRECT_RESP = 0,
- CMDQ_CMD_SYNC_SGE_RESP = 1,
-};
-
-enum completion_request {
- NO_CEQ = 0,
- CEQ_SET = 1,
-};
-
-/**
- * hinic_alloc_cmdq_buf - alloc buffer for sending command
- * @cmdqs: the cmdqs
- * @cmdq_buf: the buffer returned in this struct
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf)
-{
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
- &cmdq_buf->dma_addr);
- if (!cmdq_buf->buf) {
- dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * hinic_free_cmdq_buf - free buffer
- * @cmdqs: the cmdqs
- * @cmdq_buf: the buffer to free that is in this struct
- **/
-void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf)
-{
- dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
-}
-
-static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
-{
- unsigned int wqe_size = 0;
-
- switch (len) {
- case BUFDESC_LCMD_LEN:
- wqe_size = WQE_LCMD_SIZE;
- break;
- case BUFDESC_SCMD_LEN:
- wqe_size = WQE_SCMD_SIZE;
- break;
- }
-
- return wqe_size;
-}
-
-static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
- struct hinic_cmdq_buf *buf_out)
-{
- struct hinic_sge_resp *sge_resp = &completion->sge_resp;
-
- hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size);
-}
-
-static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
- enum completion_format complete_format,
- enum data_format data_format,
- enum bufdesc_len buf_len)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
- enum ctrl_sect_len ctrl_len;
- struct hinic_ctrl *ctrl;
- u32 saved_data;
-
- if (data_format == DATA_SGE) {
- wqe_lcmd = &wqe->wqe_lcmd;
-
- wqe_lcmd->status.status_info = 0;
- ctrl = &wqe_lcmd->ctrl;
- ctrl_len = CTRL_SECT_LEN;
- } else {
- wqe_scmd = &wqe->direct_wqe.wqe_scmd;
-
- wqe_scmd->status.status_info = 0;
- ctrl = &wqe_scmd->ctrl;
- ctrl_len = CTRL_DIRECT_SECT_LEN;
- }
-
- ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) |
- HINIC_CMDQ_CTRL_SET(cmd, CMD) |
- HINIC_CMDQ_CTRL_SET(mod, MOD) |
- HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
-
- CMDQ_WQE_HEADER(wqe)->header_info =
- HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
- HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
- HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
- HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
-
- saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
- saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
-
- if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM)
- CMDQ_WQE_HEADER(wqe)->saved_data |=
- HINIC_SAVED_DATA_SET(1, ARM);
- else
- CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
-}
-
-static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
- struct hinic_cmdq_buf *buf_in)
-{
- hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
-}
-
-static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
- void *buf_in, u32 in_size)
-{
- struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
-
- wqe_scmd->buf_desc.buf_len = in_size;
- memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
-}
-
-static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
- enum cmdq_cmd_type cmd_type,
- struct hinic_cmdq_buf *buf_in,
- struct hinic_cmdq_buf *buf_out, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
- enum completion_format complete_format;
-
- switch (cmd_type) {
- case CMDQ_CMD_SYNC_SGE_RESP:
- complete_format = COMPLETE_SGE;
- cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out);
- break;
- case CMDQ_CMD_SYNC_DIRECT_RESP:
- complete_format = COMPLETE_DIRECT;
- wqe_lcmd->completion.direct_resp = 0;
- break;
- }
-
- cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
- prod_idx, complete_format, DATA_SGE,
- BUFDESC_LCMD_LEN);
-
- cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
-}
-
-static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
- enum cmdq_cmd_type cmd_type,
- void *buf_in, u16 in_size,
- struct hinic_cmdq_buf *buf_out, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
-{
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- enum completion_format complete_format;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
-
- switch (cmd_type) {
- case CMDQ_CMD_SYNC_SGE_RESP:
- complete_format = COMPLETE_SGE;
- cmdq_set_sge_completion(&wqe_scmd->completion, buf_out);
- break;
- case CMDQ_CMD_SYNC_DIRECT_RESP:
- complete_format = COMPLETE_DIRECT;
- wqe_scmd->completion.direct_resp = 0;
- break;
- }
-
- cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
- complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
-
- cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size);
-}
-
-static void cmdq_wqe_fill(void *dst, void *src)
-{
- memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
- CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
-
- wmb(); /* The first 8 bytes should be written last */
-
- *(u64 *)dst = *(u64 *)src;
-}
-
-static void cmdq_fill_db(u32 *db_info,
- enum hinic_cmdq_type cmdq_type, u16 prod_idx)
-{
- *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
- HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) |
- HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
- HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
-}
-
-static void cmdq_set_db(struct hinic_cmdq *cmdq,
- enum hinic_cmdq_type cmdq_type, u16 prod_idx)
-{
- u32 db_info;
-
- cmdq_fill_db(&db_info, cmdq_type, prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- db_info = cpu_to_be32(db_info);
-
- wmb(); /* write all before the doorbell */
-
- writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
-}
-
-static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in,
- u64 *resp)
-{
- struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
- u16 curr_prod_idx, next_prod_idx;
- int errcode, wrapped, num_wqebbs;
- struct hinic_wq *wq = cmdq->wq;
- struct hinic_hw_wqe *hw_wqe;
- struct completion done;
-
- /* Keep doorbell index correct. bh - for tasklet(ceq). */
- spin_lock_bh(&cmdq->cmdq_lock);
-
- /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
- hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx);
- if (IS_ERR(hw_wqe)) {
- spin_unlock_bh(&cmdq->cmdq_lock);
- return -EBUSY;
- }
-
- curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
-
- wrapped = cmdq->wrapped;
-
- num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
- next_prod_idx = curr_prod_idx + num_wqebbs;
- if (next_prod_idx >= wq->q_depth) {
- cmdq->wrapped = !cmdq->wrapped;
- next_prod_idx -= wq->q_depth;
- }
-
- cmdq->errcode[curr_prod_idx] = &errcode;
-
- init_completion(&done);
- cmdq->done[curr_prod_idx] = &done;
-
- cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
- wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
- curr_prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE);
-
- /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
- cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
-
- cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
-
- spin_unlock_bh(&cmdq->cmdq_lock);
-
- if (!wait_for_completion_timeout(&done,
- msecs_to_jiffies(CMDQ_TIMEOUT))) {
- spin_lock_bh(&cmdq->cmdq_lock);
-
- if (cmdq->errcode[curr_prod_idx] == &errcode)
- cmdq->errcode[curr_prod_idx] = NULL;
-
- if (cmdq->done[curr_prod_idx] == &done)
- cmdq->done[curr_prod_idx] = NULL;
-
- spin_unlock_bh(&cmdq->cmdq_lock);
-
- hinic_dump_ceq_info(cmdq->hwdev);
- return -ETIMEDOUT;
- }
-
- smp_rmb(); /* read error code after completion */
-
- if (resp) {
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
-
- *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
- }
-
- if (errcode != 0)
- return -EFAULT;
-
- return 0;
-}
-
-static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
- u16 in_size)
-{
- struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
- u16 curr_prod_idx, next_prod_idx;
- struct hinic_wq *wq = cmdq->wq;
- struct hinic_hw_wqe *hw_wqe;
- int wrapped, num_wqebbs;
-
- /* Keep doorbell index correct */
- spin_lock(&cmdq->cmdq_lock);
-
- /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
- hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx);
- if (IS_ERR(hw_wqe)) {
- spin_unlock(&cmdq->cmdq_lock);
- return -EBUSY;
- }
-
- curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
-
- wrapped = cmdq->wrapped;
-
- num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
- next_prod_idx = curr_prod_idx + num_wqebbs;
- if (next_prod_idx >= wq->q_depth) {
- cmdq->wrapped = !cmdq->wrapped;
- next_prod_idx -= wq->q_depth;
- }
-
- cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
- in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ,
- HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE);
-
- /* cmdq wqe is not shadow, therefore wqe will be written to wq */
- cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
-
- cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
-
- spin_unlock(&cmdq->cmdq_lock);
- return 0;
-}
-
-static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
-{
- if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * hinic_cmdq_direct_resp - send command with direct data as resp
- * @cmdqs: the cmdqs
- * @mod: module on the card that will handle the command
- * @cmd: the command
- * @buf_in: the buffer for the command
- * @resp: the response to return
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in, u64 *resp)
-{
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = cmdq_params_valid(buf_in);
- if (err) {
- dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
- return err;
- }
-
- return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
- mod, cmd, buf_in, resp);
-}
-
-/**
- * hinic_set_arm_bit - set arm bit for enable interrupt again
- * @cmdqs: the cmdqs
- * @q_type: type of queue to set the arm bit for
- * @q_id: the queue number
- *
- * Return 0 - Success, negative - Failure
- **/
-static int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id)
-{
- struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_arm_bit arm_bit;
- int err;
-
- arm_bit.q_type = q_type;
- arm_bit.q_id = q_id;
-
- err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
- if (err) {
- dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
- return err;
- }
-
- return 0;
-}
-
-static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
- struct hinic_cmdq_wqe *wqe)
-{
- u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
- unsigned int bufdesc_len, wqe_size;
- struct hinic_ctrl *ctrl;
-
- bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
- wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len);
- if (wqe_size == WQE_LCMD_SIZE) {
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
-
- ctrl = &wqe_lcmd->ctrl;
- } else {
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
- ctrl = &wqe_scmd->ctrl;
- }
-
- /* clear HW busy bit */
- ctrl->ctrl_info = 0;
-
- wmb(); /* verify wqe is clear */
-}
-
-/**
- * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
- * @cmdq: the cmdq of the arm command
- * @wqe: the wqe of the arm command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
- struct hinic_cmdq_wqe *wqe)
-{
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
- struct hinic_ctrl *ctrl;
- u32 ctrl_info;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
- ctrl = &wqe_scmd->ctrl;
- ctrl_info = be32_to_cpu(ctrl->ctrl_info);
-
- /* HW should toggle the HW BUSY BIT */
- if (!CMDQ_WQE_COMPLETED(ctrl_info))
- return -EBUSY;
-
- clear_wqe_complete_bit(cmdq, wqe);
-
- hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
- return 0;
-}
-
-static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
- int errcode)
-{
- if (cmdq->errcode[prod_idx])
- *cmdq->errcode[prod_idx] = errcode;
-}
-
-/**
- * cmdq_sync_cmd_handler - cmdq completion event handler for sync command
- * @cmdq: the cmdq of the command
- * @cons_idx: the consumer index to update the error code for
- * @errcode: the error code
- **/
-static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
- int errcode)
-{
- u16 prod_idx = cons_idx;
-
- spin_lock(&cmdq->cmdq_lock);
- cmdq_update_errcode(cmdq, prod_idx, errcode);
-
- wmb(); /* write all before update for the command request */
-
- if (cmdq->done[prod_idx])
- complete(cmdq->done[prod_idx]);
- spin_unlock(&cmdq->cmdq_lock);
-}
-
-static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
- struct hinic_cmdq_wqe *cmdq_wqe)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
- struct hinic_status *status = &wqe_lcmd->status;
- struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
- int errcode;
-
- if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
- return -EBUSY;
-
- dma_rmb();
-
- errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
-
- cmdq_sync_cmd_handler(cmdq, ci, errcode);
-
- clear_wqe_complete_bit(cmdq, cmdq_wqe);
- hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
- return 0;
-}
-
-/**
- * cmdq_ceq_handler - cmdq completion event handler
- * @handle: private data for the handler(cmdqs)
- * @ceqe_data: ceq element data
- **/
-static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
-{
- enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
- struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
- struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
- struct hinic_cmdq_header *header;
- struct hinic_hw_wqe *hw_wqe;
- int err, set_arm = 0;
- u32 saved_data;
- u16 ci;
-
- /* Read the smallest wqe size for getting wqe size */
- while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
- if (IS_ERR(hw_wqe))
- break;
-
- header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
- saved_data = be32_to_cpu(header->saved_data);
-
- if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
- /* arm_bit was set until here */
- set_arm = 0;
-
- if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
- break;
- } else {
- set_arm = 1;
-
- hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
- if (IS_ERR(hw_wqe))
- break;
-
- if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
- break;
- }
- }
-
- if (set_arm) {
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type);
- if (err)
- dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
- }
-}
-
-/**
- * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
- * @cmdq_ctxt: cmdq ctxt to initialize
- * @cmdq: the cmdq
- * @cmdq_pages: the memory of the queue
- **/
-static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
- struct hinic_cmdq *cmdq,
- struct hinic_cmdq_pages *cmdq_pages)
-{
- struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
- u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
- struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
- struct hinic_wq *wq = cmdq->wq;
-
- /* The data in the HW is in Big Endian Format */
- wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
-
- pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K);
-
- ctxt_info->curr_wqe_page_pfn =
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
-
- if (wq->num_q_pages != 1) {
- /* block PFN - Read Modify Write */
- cmdq_first_block_paddr = cmdq_pages->page_paddr;
-
- pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
- }
-
- ctxt_info->wq_block_pfn =
- HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
- HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
-
- cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
- cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(cmdqs->hwif);
- cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
-}
-
-/**
- * init_cmdq - initialize cmdq
- * @cmdq: the cmdq
- * @wq: the wq attaced to the cmdq
- * @q_type: the cmdq type of the cmdq
- * @db_area: doorbell area for the cmdq
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
- enum hinic_cmdq_type q_type, void __iomem *db_area)
-{
- int err;
-
- cmdq->wq = wq;
- cmdq->cmdq_type = q_type;
- cmdq->wrapped = 1;
-
- spin_lock_init(&cmdq->cmdq_lock);
-
- cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
- if (!cmdq->done)
- return -ENOMEM;
-
- cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
- wq->q_depth));
- if (!cmdq->errcode) {
- err = -ENOMEM;
- goto err_errcode;
- }
-
- cmdq->db_base = db_area + CMDQ_DB_OFF;
- return 0;
-
-err_errcode:
- vfree(cmdq->done);
- return err;
-}
-
-/**
- * free_cmdq - Free cmdq
- * @cmdq: the cmdq to free
- **/
-static void free_cmdq(struct hinic_cmdq *cmdq)
-{
- vfree(cmdq->errcode);
- vfree(cmdq->done);
-}
-
-/**
- * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
- * @hwdev: the NIC HW device
- * @cmdqs: cmdqs to write the ctxts for
- * @db_area: db_area for all the cmdqs
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
- struct hinic_cmdqs *cmdqs, void __iomem **db_area)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- enum hinic_cmdq_type type, cmdq_type;
- struct hinic_cmdq_ctxt *cmdq_ctxts;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
- sizeof(*cmdq_ctxts), GFP_KERNEL);
- if (!cmdq_ctxts)
- return -ENOMEM;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
- cmdqs->cmdq[cmdq_type].hwdev = hwdev;
- err = init_cmdq(&cmdqs->cmdq[cmdq_type],
- &cmdqs->saved_wqs[cmdq_type], cmdq_type,
- db_area[cmdq_type]);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmdq\n");
- goto err_init_cmdq;
- }
-
- cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
- &cmdqs->cmdq[cmdq_type],
- &cmdqs->cmdq_pages);
- }
-
- /* Write the CMDQ ctxts */
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_CMDQ_CTXT_SET,
- &cmdq_ctxts[cmdq_type],
- sizeof(cmdq_ctxts[cmdq_type]),
- NULL, NULL, HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
- cmdq_type);
- goto err_write_cmdq_ctxt;
- }
- }
-
- devm_kfree(&pdev->dev, cmdq_ctxts);
- return 0;
-
-err_write_cmdq_ctxt:
- cmdq_type = HINIC_MAX_CMDQ_TYPES;
-
-err_init_cmdq:
- for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
- free_cmdq(&cmdqs->cmdq[type]);
-
- devm_kfree(&pdev->dev, cmdq_ctxts);
- return err;
-}
-
-static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
-{
- struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 };
- struct hinic_pfhwdev *pfhwdev;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
-
- hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE;
- hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_HWCTXT_SET,
- &hw_ioctxt, sizeof(hw_ioctxt), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * hinic_init_cmdqs - init all cmdqs
- * @cmdqs: cmdqs to init
- * @hwif: HW interface for accessing cmdqs
- * @db_area: doorbell areas for all the cmdqs
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
- void __iomem **db_area)
-{
- struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_hwdev *hwdev;
- u16 max_wqe_size;
- int err;
-
- cmdqs->hwif = hwif;
- cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
- HINIC_CMDQ_BUF_SIZE,
- HINIC_CMDQ_BUF_SIZE, 0);
- if (!cmdqs->cmdq_buf_pool)
- return -ENOMEM;
-
- cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
- sizeof(*cmdqs->saved_wqs), GFP_KERNEL);
- if (!cmdqs->saved_wqs) {
- err = -ENOMEM;
- goto err_saved_wqs;
- }
-
- max_wqe_size = WQE_LCMD_SIZE;
- err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
- HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
- CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
- goto err_cmdq_wqs;
- }
-
- hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
- err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
- if (err) {
- dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
- goto err_cmdq_ctxt;
- }
-
- hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
- cmdq_ceq_handler);
-
- err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH);
- if (err) {
- dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n");
- goto err_set_cmdq_depth;
- }
-
- return 0;
-
-err_set_cmdq_depth:
- hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
- free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]);
-err_cmdq_ctxt:
- hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
- HINIC_MAX_CMDQ_TYPES);
-
-err_cmdq_wqs:
- devm_kfree(&pdev->dev, cmdqs->saved_wqs);
-
-err_saved_wqs:
- dma_pool_destroy(cmdqs->cmdq_buf_pool);
- return err;
-}
-
-/**
- * hinic_free_cmdqs - free all cmdqs
- * @cmdqs: cmdqs to free
- **/
-void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
-{
- struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_cmdq_type cmdq_type;
-
- hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
-
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
- free_cmdq(&cmdqs->cmdq[cmdq_type]);
-
- hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
- HINIC_MAX_CMDQ_TYPES);
-
- devm_kfree(&pdev->dev, cmdqs->saved_wqs);
-
- dma_pool_destroy(cmdqs->cmdq_buf_pool);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
deleted file mode 100644
index ff09cf0ed52b..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_CMDQ_H
-#define HINIC_CMDQ_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/pci.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wq.h"
-
-#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
-#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56
-#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61
-#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62
-#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63
-
-#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
-#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F
-#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1
-#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1
-#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1
-
-#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \
- (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTXT_PAGE_INFO_GET(val, member) \
- (((u64)(val) >> HINIC_CMDQ_CTXT_##member##_SHIFT) \
- & HINIC_CMDQ_CTXT_##member##_MASK)
-
-#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
- ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)))
-
-#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
-#define HINIC_CMDQ_CTXT_CI_SHIFT 52
-
-#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
-#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF
-
-#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
- (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTXT_BLOCK_INFO_GET(val, member) \
- (((u64)(val) >> HINIC_CMDQ_CTXT_##member##_SHIFT) \
- & HINIC_CMDQ_CTXT_##member##_MASK)
-
-#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \
- ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)))
-
-#define HINIC_SAVED_DATA_ARM_SHIFT 31
-
-#define HINIC_SAVED_DATA_ARM_MASK 0x1
-
-#define HINIC_SAVED_DATA_SET(val, member) \
- (((u32)(val) & HINIC_SAVED_DATA_##member##_MASK) \
- << HINIC_SAVED_DATA_##member##_SHIFT)
-
-#define HINIC_SAVED_DATA_GET(val, member) \
- (((val) >> HINIC_SAVED_DATA_##member##_SHIFT) \
- & HINIC_SAVED_DATA_##member##_MASK)
-
-#define HINIC_SAVED_DATA_CLEAR(val, member) \
- ((val) & (~(HINIC_SAVED_DATA_##member##_MASK \
- << HINIC_SAVED_DATA_##member##_SHIFT)))
-
-#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
-#define HINIC_CMDQ_DB_INFO_PATH_SHIFT 23
-#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
-#define HINIC_CMDQ_DB_INFO_DB_TYPE_SHIFT 27
-
-#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFF
-#define HINIC_CMDQ_DB_INFO_PATH_MASK 0x1
-#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7
-#define HINIC_CMDQ_DB_INFO_DB_TYPE_MASK 0x1F
-
-#define HINIC_CMDQ_DB_INFO_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_DB_INFO_##member##_MASK) \
- << HINIC_CMDQ_DB_INFO_##member##_SHIFT)
-
-#define HINIC_CMDQ_BUF_SIZE 2048
-
-#define HINIC_CMDQ_BUF_HW_RSVD 8
-#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE - \
- HINIC_CMDQ_BUF_HW_RSVD)
-
-enum hinic_cmdq_type {
- HINIC_CMDQ_SYNC,
-
- HINIC_MAX_CMDQ_TYPES,
-};
-
-enum hinic_set_arm_qtype {
- HINIC_SET_ARM_CMDQ,
-};
-
-enum hinic_cmd_ack_type {
- HINIC_CMD_ACK_TYPE_CMDQ,
-};
-
-struct hinic_cmdq_buf {
- void *buf;
- dma_addr_t dma_addr;
- size_t size;
-};
-
-struct hinic_cmdq_arm_bit {
- u32 q_type;
- u32 q_id;
-};
-
-struct hinic_cmdq_ctxt_info {
- u64 curr_wqe_page_pfn;
- u64 wq_block_pfn;
-};
-
-struct hinic_cmdq_ctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 cmdq_type;
- u8 ppf_idx;
-
- u8 rsvd2[4];
-
- struct hinic_cmdq_ctxt_info ctxt_info;
-};
-
-struct hinic_cmdq {
- struct hinic_hwdev *hwdev;
-
- struct hinic_wq *wq;
-
- enum hinic_cmdq_type cmdq_type;
- int wrapped;
-
- /* Lock for keeping the doorbell order */
- spinlock_t cmdq_lock;
-
- struct completion **done;
- int **errcode;
-
- /* doorbell area */
- void __iomem *db_base;
-};
-
-struct hinic_cmdqs {
- struct hinic_hwif *hwif;
-
- struct dma_pool *cmdq_buf_pool;
-
- struct hinic_wq *saved_wqs;
-
- struct hinic_cmdq_pages cmdq_pages;
-
- struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES];
-};
-
-int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf);
-
-void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf);
-
-int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in, u64 *out_param);
-
-int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
- void __iomem **db_area);
-
-void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
deleted file mode 100644
index d56e7413ace0..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_CSR_H
-#define HINIC_HW_CSR_H
-
-/* HW interface registers */
-#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
-#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
-#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8
-#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
-#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
-
-#define HINIC_DMA_ATTR_BASE 0xC80
-#define HINIC_ELECTION_BASE 0x4200
-
-#define HINIC_DMA_ATTR_STRIDE 0x4
-#define HINIC_CSR_DMA_ATTR_ADDR(idx) \
- (HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
-
-#define HINIC_PPF_ELECTION_STRIDE 0x4
-
-#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
- (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
-
-/* API CMD registers */
-#define HINIC_CSR_API_CMD_BASE 0xF000
-
-#define HINIC_CSR_API_CMD_STRIDE 0x100
-
-#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-/* MSI-X registers */
-#define HINIC_CSR_MSIX_CTRL_BASE 0x2000
-#define HINIC_CSR_MSIX_CNT_BASE 0x2004
-
-#define HINIC_CSR_MSIX_STRIDE 0x8
-
-#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \
- (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
-
-#define HINIC_CSR_MSIX_CNT_ADDR(idx) \
- (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
-
-/* EQ registers */
-#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200
-#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400
-
-#define HINIC_EQ_MTT_OFF_STRIDE 0x40
-
-#define HINIC_CSR_AEQ_MTT_OFF(id) \
- (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_MTT_OFF(id) \
- (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
-
-#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8
-
-#define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
-
-#define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
-
-#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00
-#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04
-#define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08
-#define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C
-
-#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000
-#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004
-#define HINIC_CEQ_CONS_IDX_ADDR_BASE 0x1008
-#define HINIC_CEQ_PROD_IDX_ADDR_BASE 0x100C
-
-#define HINIC_EQ_OFF_STRIDE 0x80
-
-#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \
- (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \
- (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \
- (HINIC_AEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \
- (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \
- (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \
- (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \
- (HINIC_CEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \
- (HINIC_CEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
deleted file mode 100644
index 27795288c586..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ /dev/null
@@ -1,1197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/log2.h>
-#include <linux/err.h>
-#include <linux/netdevice.h>
-#include <net/devlink.h>
-
-#include "hinic_devlink.h"
-#include "hinic_sriov.h"
-#include "hinic_dev.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-#include "hinic_hw_dev.h"
-
-#define OUTBOUND_STATE_TIMEOUT 100
-#define DB_STATE_TIMEOUT 100
-
-#define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \
- (2 * (max_qps) + (num_aeqs) + (num_ceqs))
-
-#define ADDR_IN_4BYTES(addr) ((addr) >> 2)
-
-enum intr_type {
- INTR_MSIX_TYPE,
-};
-
-/**
- * parse_capability - convert device capabilities to NIC capabilities
- * @hwdev: the HW device to set and convert device capabilities for
- * @dev_cap: device capabilities from FW
- *
- * Return 0 - Success, negative - Failure
- **/
-static int parse_capability(struct hinic_hwdev *hwdev,
- struct hinic_dev_cap *dev_cap)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
- int num_aeqs, num_ceqs, num_irqs;
-
- if (!HINIC_IS_VF(hwdev->hwif) && dev_cap->intr_type != INTR_MSIX_TYPE)
- return -EFAULT;
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif);
- num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif);
-
- /* Each QP has its own (SQ + RQ) interrupts */
- nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2;
-
- if (nic_cap->num_qps > HINIC_Q_CTXT_MAX)
- nic_cap->num_qps = HINIC_Q_CTXT_MAX;
-
- if (!HINIC_IS_VF(hwdev->hwif))
- nic_cap->max_qps = dev_cap->max_sqs + 1;
- else
- nic_cap->max_qps = dev_cap->max_sqs;
-
- if (nic_cap->num_qps > nic_cap->max_qps)
- nic_cap->num_qps = nic_cap->max_qps;
-
- if (!HINIC_IS_VF(hwdev->hwif)) {
- nic_cap->max_vf = dev_cap->max_vf;
- nic_cap->max_vf_qps = dev_cap->max_vf_sqs + 1;
- }
-
- hwdev->port_id = dev_cap->port_id;
-
- return 0;
-}
-
-/**
- * get_capability - get device capabilities from FW
- * @pfhwdev: the PF HW device to get capabilities for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_capability(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_dev_cap dev_cap;
- u16 out_len;
- int err;
-
- out_len = sizeof(dev_cap);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM,
- HINIC_CFG_NIC_CAP, &dev_cap, sizeof(dev_cap),
- &dev_cap, &out_len, HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
- return err;
- }
-
- return parse_capability(hwdev, &dev_cap);
-}
-
-/**
- * get_dev_cap - get device capabilities
- * @hwdev: the NIC HW device to get capabilities for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_dev_cap(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- switch (HINIC_FUNC_TYPE(hwif)) {
- case HINIC_PPF:
- case HINIC_PF:
- case HINIC_VF:
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
- err = get_capability(pfhwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to get capability\n");
- return err;
- }
- break;
- default:
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * init_msix - enable the msix and save the entries
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_msix(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int nr_irqs, num_aeqs, num_ceqs;
- int i, err;
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
- nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs);
- if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
- nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
-
- hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs,
- sizeof(*hwdev->msix_entries),
- GFP_KERNEL);
- if (!hwdev->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < nr_irqs; i++)
- hwdev->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs);
- if (err) {
- dev_err(&pdev->dev, "Failed to enable pci msix\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * disable_msix - disable the msix
- * @hwdev: the NIC HW device
- **/
-static void disable_msix(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- pci_disable_msix(pdev);
-}
-
-/**
- * hinic_port_msg_cmd - send port msg to mgmt
- * @hwdev: the NIC HW device
- * @cmd: the port command
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_out: output buffer
- * @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_pfhwdev *pfhwdev;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd,
- buf_in, in_size, buf_out, out_size,
- HINIC_MGMT_MSG_SYNC);
-}
-
-int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out,
- u16 *out_size)
-{
- struct hinic_pfhwdev *pfhwdev;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_HILINK, cmd,
- buf_in, in_size, buf_out, out_size,
- HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * init_fw_ctxt- Init Firmware tables before network mgmt and io operations
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_fw_ctxt(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmd_fw_ctxt fw_ctxt;
- u16 out_size = sizeof(fw_ctxt);
- int err;
-
- fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
- &fw_ctxt, sizeof(fw_ctxt),
- &fw_ctxt, &out_size);
- if (err || out_size != sizeof(fw_ctxt) || fw_ctxt.status) {
- dev_err(&pdev->dev, "Failed to init FW ctxt, err: %d, status: 0x%x, out size: 0x%x\n",
- err, fw_ctxt.status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * set_hw_ioctxt - set the shape of the IO queues in FW
- * @hwdev: the NIC HW device
- * @rq_depth: rq depth
- * @sq_depth: sq depth
- *
- * Return 0 - Success, negative - Failure
- **/
-static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth,
- unsigned int rq_depth)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_cmd_hw_ioctxt hw_ioctxt;
- struct hinic_pfhwdev *pfhwdev;
-
- hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
-
- hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
- hw_ioctxt.cmdq_depth = 0;
-
- hw_ioctxt.lro_en = 1;
-
- hw_ioctxt.rq_depth = ilog2(rq_depth);
-
- hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX;
-
- hw_ioctxt.sq_depth = ilog2(sq_depth);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_HWCTXT_SET,
- &hw_ioctxt, sizeof(hw_ioctxt), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-static int wait_for_outbound_state(struct hinic_hwdev *hwdev)
-{
- enum hinic_outbound_state outbound_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT);
- do {
- outbound_state = hinic_outbound_state_get(hwif);
-
- if (outbound_state == HINIC_OUTBOUND_ENABLE)
- return 0;
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n");
- return -EFAULT;
-}
-
-static int wait_for_db_state(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_db_state db_state;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT);
- do {
- db_state = hinic_db_state_get(hwif);
-
- if (db_state == HINIC_DB_ENABLE)
- return 0;
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for DB - Timeout\n");
- return -EFAULT;
-}
-
-/**
- * clear_io_resources - set the IO resources as not active in the NIC
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int clear_io_resources(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_clear_io_res cmd_clear_io_res;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- /* sleep 100ms to wait for firmware stopping I/O */
- msleep(100);
-
- cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res,
- sizeof(cmd_clear_io_res), NULL, NULL,
- HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to clear IO resources\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * set_resources_state - set the state of the resources in the NIC
- * @hwdev: the NIC HW device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-static int set_resources_state(struct hinic_hwdev *hwdev,
- enum hinic_res_state state)
-{
- struct hinic_cmd_set_res_state res_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_pfhwdev *pfhwdev;
-
- res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- res_state.state = state;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_COMM,
- HINIC_COMM_CMD_RES_STATE_SET,
- &res_state, sizeof(res_state), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * get_base_qpn - get the first qp number
- * @hwdev: the NIC HW device
- * @base_qpn: returned qp number
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
-{
- struct hinic_cmd_base_qpn cmd_base_qpn;
- struct hinic_hwif *hwif = hwdev->hwif;
- u16 out_size = sizeof(cmd_base_qpn);
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN,
- &cmd_base_qpn, sizeof(cmd_base_qpn),
- &cmd_base_qpn, &out_size);
- if (err || out_size != sizeof(cmd_base_qpn) || cmd_base_qpn.status) {
- dev_err(&pdev->dev, "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n",
- err, cmd_base_qpn.status, out_size);
- return -EIO;
- }
-
- *base_qpn = cmd_base_qpn.qpn;
- return 0;
-}
-
-/**
- * hinic_hwdev_ifup - Preparing the HW for passing IO
- * @hwdev: the NIC HW device
- * @sq_depth: the send queue depth
- * @rq_depth: the receive queue depth
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
- struct hinic_hwif *hwif = hwdev->hwif;
- int err, num_aeqs, num_ceqs, num_qps;
- struct msix_entry *ceq_msix_entries;
- struct msix_entry *sq_msix_entries;
- struct msix_entry *rq_msix_entries;
- struct pci_dev *pdev = hwif->pdev;
- u16 base_qpn;
-
- err = get_base_qpn(hwdev, &base_qpn);
- if (err) {
- dev_err(&pdev->dev, "Failed to get global base qp number\n");
- return err;
- }
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
-
- ceq_msix_entries = &hwdev->msix_entries[num_aeqs];
- func_to_io->hwdev = hwdev;
- func_to_io->sq_depth = sq_depth;
- func_to_io->rq_depth = rq_depth;
- func_to_io->global_qpn = base_qpn;
-
- err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
- ceq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init IO channel\n");
- return err;
- }
-
- num_qps = nic_cap->num_qps;
- sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs];
- rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps];
-
- err = hinic_io_create_qps(func_to_io, base_qpn, num_qps,
- sq_msix_entries, rq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to create QPs\n");
- goto err_create_qps;
- }
-
- err = wait_for_db_state(hwdev);
- if (err) {
- dev_warn(&pdev->dev, "db - disabled, try again\n");
- hinic_db_state_set(hwif, HINIC_DB_ENABLE);
- }
-
- err = set_hw_ioctxt(hwdev, sq_depth, rq_depth);
- if (err) {
- dev_err(&pdev->dev, "Failed to set HW IO ctxt\n");
- goto err_hw_ioctxt;
- }
-
- return 0;
-
-err_hw_ioctxt:
- hinic_io_destroy_qps(func_to_io, num_qps);
-
-err_create_qps:
- hinic_io_free(func_to_io);
- return err;
-}
-
-/**
- * hinic_hwdev_ifdown - Closing the HW for passing IO
- * @hwdev: the NIC HW device
- *
- **/
-void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- clear_io_resources(hwdev);
-
- hinic_io_destroy_qps(func_to_io, nic_cap->num_qps);
- hinic_io_free(func_to_io);
-}
-
-/**
- * hinic_hwdev_cb_register - register callback handler for MGMT events
- * @hwdev: the NIC HW device
- * @cmd: the mgmt event
- * @handle: private data for the handler
- * @handler: event handler
- **/
-void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd, void *handle,
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size))
-{
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_nic_cb *nic_cb;
- u8 cmd_cb;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- nic_cb->handler = handler;
- nic_cb->handle = handle;
- nic_cb->cb_state = HINIC_CB_ENABLED;
-}
-
-/**
- * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events
- * @hwdev: the NIC HW device
- * @cmd: the mgmt event
- **/
-void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_nic_cb *nic_cb;
- u8 cmd_cb;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif))
- return;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- nic_cb->cb_state &= ~HINIC_CB_ENABLED;
-
- while (nic_cb->cb_state & HINIC_CB_RUNNING)
- schedule();
-
- nic_cb->handler = NULL;
-}
-
-/**
- * nic_mgmt_msg_handler - nic mgmt event handler
- * @handle: private data for the handler
- * @cmd: message command
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_out: output buffer
- * @out_size: returned output size
- **/
-static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_pfhwdev *pfhwdev = handle;
- enum hinic_cb_state cb_state;
- struct hinic_nic_cb *nic_cb;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- u8 cmd_cb;
-
- hwdev = &pfhwdev->hwdev;
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- if (cmd < HINIC_MGMT_MSG_CMD_BASE ||
- cmd >= HINIC_MGMT_MSG_CMD_MAX) {
- dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd);
- return;
- }
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
-
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- cb_state = cmpxchg(&nic_cb->cb_state,
- HINIC_CB_ENABLED,
- HINIC_CB_ENABLED | HINIC_CB_RUNNING);
-
- if (cb_state == HINIC_CB_ENABLED && nic_cb->handler)
- nic_cb->handler(nic_cb->handle, buf_in,
- in_size, buf_out, out_size);
- else
- dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd);
-
- nic_cb->cb_state &= ~HINIC_CB_RUNNING;
-}
-
-static void hinic_comm_recv_mgmt_self_cmd_reg(struct hinic_pfhwdev *pfhwdev,
- u8 cmd,
- comm_mgmt_self_msg_proc proc)
-{
- u8 cmd_idx;
-
- cmd_idx = pfhwdev->proc.cmd_num;
- if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
- dev_err(&pfhwdev->hwdev.hwif->pdev->dev,
- "Register recv mgmt process failed, cmd: 0x%x\n", cmd);
- return;
- }
-
- pfhwdev->proc.info[cmd_idx].cmd = cmd;
- pfhwdev->proc.info[cmd_idx].proc = proc;
- pfhwdev->proc.cmd_num++;
-}
-
-static void hinic_comm_recv_mgmt_self_cmd_unreg(struct hinic_pfhwdev *pfhwdev,
- u8 cmd)
-{
- u8 cmd_idx;
-
- cmd_idx = pfhwdev->proc.cmd_num;
- if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
- dev_err(&pfhwdev->hwdev.hwif->pdev->dev, "Unregister recv mgmt process failed, cmd: 0x%x\n",
- cmd);
- return;
- }
-
- for (cmd_idx = 0; cmd_idx < HINIC_COMM_SELF_CMD_MAX; cmd_idx++) {
- if (cmd == pfhwdev->proc.info[cmd_idx].cmd) {
- pfhwdev->proc.info[cmd_idx].cmd = 0;
- pfhwdev->proc.info[cmd_idx].proc = NULL;
- pfhwdev->proc.cmd_num--;
- }
- }
-}
-
-static void comm_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_pfhwdev *pfhwdev = handle;
- u8 cmd_idx;
-
- for (cmd_idx = 0; cmd_idx < pfhwdev->proc.cmd_num; cmd_idx++) {
- if (cmd == pfhwdev->proc.info[cmd_idx].cmd) {
- if (!pfhwdev->proc.info[cmd_idx].proc) {
- dev_warn(&pfhwdev->hwdev.hwif->pdev->dev,
- "PF recv mgmt comm msg handle null, cmd: 0x%x\n",
- cmd);
- } else {
- pfhwdev->proc.info[cmd_idx].proc
- (&pfhwdev->hwdev, buf_in, in_size,
- buf_out, out_size);
- }
-
- return;
- }
- }
-
- dev_warn(&pfhwdev->hwdev.hwif->pdev->dev, "Received unknown mgmt cpu event: 0x%x\n",
- cmd);
-
- *out_size = 0;
-}
-
-/* pf fault report event */
-static void pf_fault_event_handler(void *dev, void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
-{
- struct hinic_cmd_fault_event *fault_event = buf_in;
- struct hinic_hwdev *hwdev = dev;
-
- if (in_size != sizeof(*fault_event)) {
- dev_err(&hwdev->hwif->pdev->dev, "Invalid fault event report, length: %d, should be %zu\n",
- in_size, sizeof(*fault_event));
- return;
- }
-
- if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->hw_fault_reporter))
- return;
-
- devlink_health_report(hwdev->devlink_dev->hw_fault_reporter,
- "HW fatal error reported", &fault_event->event);
-}
-
-static void mgmt_watchdog_timeout_event_handler(void *dev,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
-{
- struct hinic_mgmt_watchdog_info *watchdog_info = buf_in;
- struct hinic_hwdev *hwdev = dev;
-
- if (in_size != sizeof(*watchdog_info)) {
- dev_err(&hwdev->hwif->pdev->dev, "Invalid mgmt watchdog report, length: %d, should be %zu\n",
- in_size, sizeof(*watchdog_info));
- return;
- }
-
- if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->fw_fault_reporter))
- return;
-
- devlink_health_report(hwdev->devlink_dev->fw_fault_reporter,
- "FW fatal error reported", watchdog_info);
-}
-
-/**
- * init_pfhwdev - Initialize the extended components of PF
- * @pfhwdev: the HW device for PF
- *
- * Return 0 - success, negative - failure
- **/
-static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n");
- return err;
- }
-
- err = hinic_func_to_func_init(hwdev);
- if (err) {
- dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
- hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
- return err;
- }
-
- if (!HINIC_IS_VF(hwif)) {
- hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_L2NIC, pfhwdev,
- nic_mgmt_msg_handler);
- hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- pfhwdev, comm_mgmt_msg_handler);
- hinic_comm_recv_mgmt_self_cmd_reg(pfhwdev,
- HINIC_COMM_CMD_FAULT_REPORT,
- pf_fault_event_handler);
- hinic_comm_recv_mgmt_self_cmd_reg
- (pfhwdev, HINIC_COMM_CMD_WATCHDOG_INFO,
- mgmt_watchdog_timeout_event_handler);
- } else {
- hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
- nic_mgmt_msg_handler);
- }
-
- hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
- hinic_devlink_register(hwdev->devlink_dev);
- return 0;
-}
-
-/**
- * free_pfhwdev - Free the extended components of PF
- * @pfhwdev: the HW device for PF
- **/
-static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
-
- hinic_devlink_unregister(hwdev->devlink_dev);
- hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
-
- if (!HINIC_IS_VF(hwdev->hwif)) {
- hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev,
- HINIC_COMM_CMD_WATCHDOG_INFO);
- hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev,
- HINIC_COMM_CMD_FAULT_REPORT);
- hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_COMM);
- hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_L2NIC);
- } else {
- hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
- }
-
- hinic_func_to_func_free(hwdev);
-
- hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
-}
-
-static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_l2nic_reset l2nic_reset = {0};
- u16 out_size = sizeof(l2nic_reset);
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- l2nic_reset.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- /* 0 represents standard l2nic reset flow */
- l2nic_reset.reset_flag = 0;
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_L2NIC_RESET, &l2nic_reset,
- sizeof(l2nic_reset), &l2nic_reset,
- &out_size, HINIC_MGMT_MSG_SYNC);
- if (err || !out_size || l2nic_reset.status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n",
- err, l2nic_reset.status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info)
-{
- u16 out_size = sizeof(*interrupt_info);
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- if (!hwdev || !interrupt_info)
- return -EINVAL;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP,
- interrupt_info, sizeof(*interrupt_info),
- interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC);
- if (err || !out_size || interrupt_info->status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
- err, interrupt_info->status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info)
-{
- u16 out_size = sizeof(*interrupt_info);
- struct hinic_msix_config temp_info;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- if (!hwdev)
- return -EINVAL;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
-
- err = hinic_get_interrupt_cfg(hwdev, &temp_info);
- if (err)
- return -EINVAL;
-
- interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt;
- interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt;
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP,
- interrupt_info, sizeof(*interrupt_info),
- interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC);
- if (err || !out_size || interrupt_info->status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
- err, interrupt_info->status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * hinic_init_hwdev - Initialize the NIC HW
- * @pdev: the NIC pci device
- * @devlink: the poniter of hinic devlink
- *
- * Return initialized NIC HW device
- *
- * Initialize the NIC HW device and return a pointer to it
- **/
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devlink)
-{
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- int err, num_aeqs;
-
- hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL);
- if (!hwif)
- return ERR_PTR(-ENOMEM);
-
- err = hinic_init_hwif(hwif, pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init HW interface\n");
- return ERR_PTR(err);
- }
-
- pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL);
- if (!pfhwdev) {
- err = -ENOMEM;
- goto err_pfhwdev_alloc;
- }
-
- hwdev = &pfhwdev->hwdev;
- hwdev->hwif = hwif;
- hwdev->devlink_dev = devlink_priv(devlink);
- hwdev->devlink_dev->hwdev = hwdev;
-
- err = init_msix(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init msix\n");
- goto err_init_msix;
- }
-
- err = wait_for_outbound_state(hwdev);
- if (err) {
- dev_warn(&pdev->dev, "outbound - disabled, try again\n");
- hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE);
- }
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
-
- err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs,
- HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE,
- hwdev->msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init async event queues\n");
- goto err_aeqs_init;
- }
-
- err = init_pfhwdev(pfhwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init PF HW device\n");
- goto err_init_pfhwdev;
- }
-
- err = hinic_l2nic_reset(hwdev);
- if (err)
- goto err_l2nic_reset;
-
- err = get_dev_cap(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to get device capabilities\n");
- goto err_dev_cap;
- }
-
- mutex_init(&hwdev->func_to_io.nic_cfg.cfg_mutex);
-
- err = hinic_vf_func_init(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init nic mbox\n");
- goto err_vf_func_init;
- }
-
- err = init_fw_ctxt(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init function table\n");
- goto err_init_fw_ctxt;
- }
-
- err = set_resources_state(hwdev, HINIC_RES_ACTIVE);
- if (err) {
- dev_err(&pdev->dev, "Failed to set resources state\n");
- goto err_resources_state;
- }
-
- return hwdev;
-
-err_resources_state:
-err_init_fw_ctxt:
- hinic_vf_func_free(hwdev);
-err_vf_func_init:
-err_l2nic_reset:
-err_dev_cap:
- free_pfhwdev(pfhwdev);
-
-err_init_pfhwdev:
- hinic_aeqs_free(&hwdev->aeqs);
-
-err_aeqs_init:
- disable_msix(hwdev);
-
-err_init_msix:
-err_pfhwdev_alloc:
- hinic_free_hwif(hwif);
- if (err > 0)
- err = -EIO;
- return ERR_PTR(err);
-}
-
-/**
- * hinic_free_hwdev - Free the NIC HW device
- * @hwdev: the NIC HW device
- **/
-void hinic_free_hwdev(struct hinic_hwdev *hwdev)
-{
- struct hinic_pfhwdev *pfhwdev = container_of(hwdev,
- struct hinic_pfhwdev,
- hwdev);
-
- set_resources_state(hwdev, HINIC_RES_CLEAN);
-
- hinic_vf_func_free(hwdev);
-
- free_pfhwdev(pfhwdev);
-
- hinic_aeqs_free(&hwdev->aeqs);
-
- disable_msix(hwdev);
-
- hinic_free_hwif(hwdev->hwif);
-}
-
-/**
- * hinic_hwdev_num_qps - return the number QPs available for use
- * @hwdev: the NIC HW device
- *
- * Return number QPs available for use
- **/
-int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- return nic_cap->num_qps;
-}
-
-/**
- * hinic_hwdev_get_sq - get SQ
- * @hwdev: the NIC HW device
- * @i: the position of the SQ
- *
- * Return: the SQ in the i position
- **/
-struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_qp *qp = &func_to_io->qps[i];
-
- if (i >= hinic_hwdev_num_qps(hwdev))
- return NULL;
-
- return &qp->sq;
-}
-
-/**
- * hinic_hwdev_get_rq - get RQ
- * @hwdev: the NIC HW device
- * @i: the position of the RQ
- *
- * Return: the RQ in the i position
- **/
-struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_qp *qp = &func_to_io->qps[i];
-
- if (i >= hinic_hwdev_num_qps(hwdev))
- return NULL;
-
- return &qp->rq;
-}
-
-/**
- * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry
- * @hwdev: the NIC HW device
- * @msix_index: msix_index
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index)
-{
- return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index);
-}
-
-/**
- * hinic_hwdev_msix_set - set message attribute for msix entry
- * @hwdev: the NIC HW device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer)
-{
- return hinic_msix_attr_set(hwdev->hwif, msix_index,
- pending_limit, coalesc_timer,
- lli_timer_cfg, lli_credit_limit,
- resend_timer);
-}
-
-/**
- * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq
- * @hwdev: the NIC HW device
- * @sq: send queue
- * @pending_limit: the maximum pending update ci events (unit 8)
- * @coalesc_timer: coalesc period for update ci (unit 8 us)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
- u8 pending_limit, u8 coalesc_timer)
-{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_cmd_hw_ci hw_ci;
-
- hw_ci.dma_attr_off = 0;
- hw_ci.pending_limit = pending_limit;
- hw_ci.coalesc_timer = coalesc_timer;
-
- hw_ci.msix_en = 1;
- hw_ci.msix_entry_idx = sq->msix_entry;
-
- hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- hw_ci.sq_id = qp->q_id;
-
- hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_COMM,
- HINIC_COMM_CMD_SQ_HI_CI_SET,
- &hw_ci, sizeof(hw_ci), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * hinic_hwdev_set_msix_state- set msix state
- * @hwdev: the NIC HW device
- * @msix_index: IRQ corresponding index number
- * @flag: msix state
- *
- **/
-void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
- enum hinic_msix_state flag)
-{
- hinic_set_msix_state(hwdev->hwif, msix_index, flag);
-}
-
-int hinic_get_board_info(struct hinic_hwdev *hwdev,
- struct hinic_comm_board_info *board_info)
-{
- u16 out_size = sizeof(*board_info);
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- if (!hwdev || !board_info)
- return -EINVAL;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_GET_BOARD_INFO,
- board_info, sizeof(*board_info),
- board_info, &out_size, HINIC_MGMT_MSG_SYNC);
- if (err || board_info->status || !out_size) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n",
- err, board_info->status, out_size);
- return -EIO;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
deleted file mode 100644
index 6b5797e69781..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ /dev/null
@@ -1,660 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_DEV_H
-#define HINIC_HW_DEV_H
-
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <net/devlink.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-#include "hinic_hw_mbox.h"
-
-#define HINIC_MAX_QPS 32
-
-#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \
- HINIC_MGMT_MSG_CMD_BASE)
-
-#define HINIC_PF_SET_VF_ALREADY 0x4
-#define HINIC_MGMT_STATUS_EXIST 0x6
-#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
-
-#define HINIC_CMD_VER_FUNC_ID 2
-
-struct hinic_cap {
- u16 max_qps;
- u16 num_qps;
- u8 max_vf;
- u16 max_vf_qps;
-};
-
-enum hw_ioctxt_set_cmdq_depth {
- HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
- HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE,
-};
-
-enum hinic_port_cmd {
- HINIC_PORT_CMD_VF_REGISTER = 0x0,
- HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
-
- HINIC_PORT_CMD_CHANGE_MTU = 0x2,
-
- HINIC_PORT_CMD_ADD_VLAN = 0x3,
- HINIC_PORT_CMD_DEL_VLAN = 0x4,
-
- HINIC_PORT_CMD_SET_ETS = 0x7,
- HINIC_PORT_CMD_GET_ETS = 0x8,
-
- HINIC_PORT_CMD_SET_PFC = 0x5,
-
- HINIC_PORT_CMD_SET_MAC = 0x9,
- HINIC_PORT_CMD_GET_MAC = 0xA,
- HINIC_PORT_CMD_DEL_MAC = 0xB,
-
- HINIC_PORT_CMD_SET_RX_MODE = 0xC,
-
- HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xD,
-
- HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14,
- HINIC_PORT_CMD_SET_PAUSE_INFO = 0x15,
-
- HINIC_PORT_CMD_GET_LINK_STATE = 0x18,
-
- HINIC_PORT_CMD_SET_LRO = 0x19,
-
- HINIC_PORT_CMD_SET_RX_CSUM = 0x1A,
-
- HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1B,
-
- HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1C,
-
- HINIC_PORT_CMD_CLEAR_PORT_STATISTICS = 0x1D,
-
- HINIC_PORT_CMD_GET_VPORT_STAT = 0x1E,
-
- HINIC_PORT_CMD_CLEAN_VPORT_STAT = 0x1F,
-
- HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25,
-
- HINIC_PORT_CMD_SET_PORT_STATE = 0x29,
- HINIC_PORT_CMD_GET_PORT_STATE = 0x30,
-
- HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2B,
-
- HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL = 0x2C,
-
- HINIC_PORT_CMD_SET_RSS_HASH_ENGINE = 0x2D,
-
- HINIC_PORT_CMD_GET_RSS_HASH_ENGINE = 0x2E,
-
- HINIC_PORT_CMD_GET_RSS_CTX_TBL = 0x2F,
-
- HINIC_PORT_CMD_SET_RSS_CTX_TBL = 0x30,
-
- HINIC_PORT_CMD_RSS_TEMP_MGR = 0x31,
-
- HINIC_PORT_CMD_RD_LINE_TBL = 0x39,
-
- HINIC_PORT_CMD_RSS_CFG = 0x42,
-
- HINIC_PORT_CMD_GET_PHY_TYPE = 0x44,
-
- HINIC_PORT_CMD_FWCTXT_INIT = 0x45,
-
- HINIC_PORT_CMD_GET_LOOPBACK_MODE = 0x48,
- HINIC_PORT_CMD_SET_LOOPBACK_MODE = 0x49,
-
- HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4A,
- HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE = 0x4B,
-
- HINIC_PORT_CMD_ENABLE_SPOOFCHK = 0x4E,
-
- HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58,
-
- HINIC_PORT_CMD_GET_PORT_TYPE = 0x5B,
-
- HINIC_PORT_CMD_SET_FUNC_STATE = 0x5D,
-
- HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5E,
-
- HINIC_PORT_CMD_GET_DMA_CS = 0x64,
- HINIC_PORT_CMD_SET_DMA_CS = 0x65,
-
- HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66,
-
- HINIC_PORT_CMD_SET_VF_RATE = 0x69,
-
- HINIC_PORT_CMD_SET_VF_VLAN = 0x6A,
-
- HINIC_PORT_CMD_CLR_VF_VLAN = 0x6B,
-
- HINIC_PORT_CMD_SET_TSO = 0x70,
-
- HINIC_PORT_CMD_UPDATE_FW = 0x72,
-
- HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73,
-
- HINIC_PORT_CMD_SET_PFC_THD = 0x75,
-
- HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xA0,
-
- HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xA3,
-
- HINIC_PORT_CMD_UPDATE_MAC = 0xA4,
-
- HINIC_PORT_CMD_GET_CAP = 0xAA,
-
- HINIC_PORT_CMD_UP_TC_ADD_FLOW = 0xAF,
- HINIC_PORT_CMD_UP_TC_DEL_FLOW = 0xB0,
- HINIC_PORT_CMD_UP_TC_GET_FLOW = 0xB1,
-
- HINIC_PORT_CMD_UP_TC_FLUSH_TCAM = 0xB2,
-
- HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK = 0xB3,
-
- HINIC_PORT_CMD_UP_TC_ENABLE = 0xB4,
-
- HINIC_PORT_CMD_UP_TC_GET_TCAM_BLOCK = 0xB5,
-
- HINIC_PORT_CMD_SET_IPSU_MAC = 0xCB,
- HINIC_PORT_CMD_GET_IPSU_MAC = 0xCC,
-
- HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4,
-
- HINIC_PORT_CMD_GET_LINK_MODE = 0xD9,
-
- HINIC_PORT_CMD_SET_SPEED = 0xDA,
-
- HINIC_PORT_CMD_SET_AUTONEG = 0xDB,
-
- HINIC_PORT_CMD_CLEAR_QP_RES = 0xDD,
-
- HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE,
-
- HINIC_PORT_CMD_SET_VF_COS = 0xDF,
- HINIC_PORT_CMD_GET_VF_COS = 0xE1,
-
- HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5,
-
- HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6,
-
- HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8,
-
- HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB,
-
- HINIC_PORT_CMD_GET_STD_SFP_INFO = 0xF0,
-
- HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3,
-
- HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4,
-
- HINIC_PORT_CMD_SET_VHD_CFG = 0xF7,
-
- HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8,
-
- HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 0xF9,
-
- HINIC_PORT_CMD_GET_SFP_ABS = 0xFB,
-
- HINIC_PORT_CMD_Q_FILTER = 0xFC,
-
- HINIC_PORT_CMD_TCAM_FILTER = 0xFE,
-
- HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF,
-};
-
-/* cmd of mgmt CPU message for HILINK module */
-enum hinic_hilink_cmd {
- HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
- HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
-};
-
-enum hinic_ucode_cmd {
- HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT = 0,
- HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
- HINIC_UCODE_CMD_ARM_SQ,
- HINIC_UCODE_CMD_ARM_RQ,
- HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
- HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
- HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE,
- HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE,
- HINIC_UCODE_CMD_SET_IQ_ENABLE,
- HINIC_UCODE_CMD_SET_RQ_FLUSH = 10
-};
-
-#define NIC_RSS_CMD_TEMP_ALLOC 0x01
-#define NIC_RSS_CMD_TEMP_FREE 0x02
-
-enum hinic_mgmt_msg_cmd {
- HINIC_MGMT_MSG_CMD_BASE = 0xA0,
-
- HINIC_MGMT_MSG_CMD_LINK_STATUS = 0xA0,
-
- HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT = 0xE5,
- HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT = 0xE6,
-
- HINIC_MGMT_MSG_CMD_MAX,
-};
-
-enum hinic_cb_state {
- HINIC_CB_ENABLED = BIT(0),
- HINIC_CB_RUNNING = BIT(1),
-};
-
-enum hinic_res_state {
- HINIC_RES_CLEAN = 0,
- HINIC_RES_ACTIVE = 1,
-};
-
-struct hinic_cmd_fw_ctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rx_buf_sz;
-
- u32 rsvd1;
-};
-
-struct hinic_cmd_hw_ioctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
-
- u16 rsvd1;
-
- u8 set_cmdq_depth;
- u8 cmdq_depth;
-
- u8 lro_en;
- u8 rsvd3;
- u8 ppf_idx;
- u8 rsvd4;
-
- u16 rq_depth;
- u16 rx_buf_sz_idx;
- u16 sq_depth;
-};
-
-struct hinic_cmd_io_status {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 rsvd1;
- u8 rsvd2;
- u32 io_status;
-};
-
-struct hinic_cmd_clear_io_res {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 rsvd1;
- u8 rsvd2;
-};
-
-struct hinic_cmd_set_res_state {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 state;
- u8 rsvd1;
- u32 rsvd2;
-};
-
-struct hinic_ceq_ctrl_reg {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 q_id;
- u32 ctrl0;
- u32 ctrl1;
-};
-
-struct hinic_cmd_base_qpn {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 qpn;
-};
-
-struct hinic_cmd_hw_ci {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
-
- u8 dma_attr_off;
- u8 pending_limit;
- u8 coalesc_timer;
-
- u8 msix_en;
- u16 msix_entry_idx;
-
- u32 sq_id;
- u32 rsvd1;
- u64 ci_addr;
-};
-
-struct hinic_cmd_l2nic_reset {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 reset_flag;
-};
-
-struct hinic_msix_config {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 msix_index;
- u8 pending_cnt;
- u8 coalesce_timer_cnt;
- u8 lli_timer_cnt;
- u8 lli_credit_cnt;
- u8 resend_timer_cnt;
- u8 rsvd1[3];
-};
-
-struct hinic_set_random_id {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 vf_in_pf;
- u8 rsvd1;
- u16 func_idx;
- u32 random_id;
-};
-
-struct hinic_board_info {
- u32 board_type;
- u32 port_num;
- u32 port_speed;
- u32 pcie_width;
- u32 host_num;
- u32 pf_num;
- u32 vf_total_num;
- u32 tile_num;
- u32 qcm_num;
- u32 core_num;
- u32 work_mode;
- u32 service_mode;
- u32 pcie_mode;
- u32 cfg_addr;
- u32 boot_sel;
- u32 board_id;
-};
-
-struct hinic_comm_board_info {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- struct hinic_board_info info;
-
- u32 rsvd1[4];
-};
-
-struct hinic_hwdev {
- struct hinic_hwif *hwif;
- struct msix_entry *msix_entries;
-
- struct hinic_aeqs aeqs;
- struct hinic_func_to_io func_to_io;
- struct hinic_mbox_func_to_func *func_to_func;
-
- struct hinic_cap nic_cap;
- u8 port_id;
- struct hinic_devlink_priv *devlink_dev;
-};
-
-struct hinic_nic_cb {
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size);
-
- void *handle;
- unsigned long cb_state;
-};
-
-#define HINIC_COMM_SELF_CMD_MAX 4
-
-typedef void (*comm_mgmt_self_msg_proc)(void *handle, void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size);
-
-struct comm_mgmt_self_msg_sub_info {
- u8 cmd;
- comm_mgmt_self_msg_proc proc;
-};
-
-struct comm_mgmt_self_msg_info {
- u8 cmd_num;
- struct comm_mgmt_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX];
-};
-
-struct hinic_pfhwdev {
- struct hinic_hwdev hwdev;
-
- struct hinic_pf_to_mgmt pf_to_mgmt;
-
- struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD];
-
- struct comm_mgmt_self_msg_info proc;
-};
-
-struct hinic_dev_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 rsvd1[5];
- u8 intr_type;
- u8 max_cos_id;
- u8 er_id;
- u8 port_id;
- u8 max_vf;
- u8 rsvd2[62];
- u16 max_sqs;
- u16 max_rqs;
- u16 max_vf_sqs;
- u16 max_vf_rqs;
- u8 rsvd3[204];
-};
-
-union hinic_fault_hw_mgmt {
- u32 val[4];
- /* valid only type == FAULT_TYPE_CHIP */
- struct {
- u8 node_id;
- u8 err_level;
- u16 err_type;
- u32 err_csr_addr;
- u32 err_csr_value;
- /* func_id valid only if err_level == FAULT_LEVEL_SERIOUS_FLR */
- u16 func_id;
- u16 rsvd2;
- } chip;
-
- /* valid only if type == FAULT_TYPE_UCODE */
- struct {
- u8 cause_id;
- u8 core_id;
- u8 c_id;
- u8 rsvd3;
- u32 epc;
- u32 rsvd4;
- u32 rsvd5;
- } ucode;
-
- /* valid only if type == FAULT_TYPE_MEM_RD_TIMEOUT ||
- * FAULT_TYPE_MEM_WR_TIMEOUT
- */
- struct {
- u32 err_csr_ctrl;
- u32 err_csr_data;
- u32 ctrl_tab;
- u32 mem_index;
- } mem_timeout;
-
- /* valid only if type == FAULT_TYPE_REG_RD_TIMEOUT ||
- * FAULT_TYPE_REG_WR_TIMEOUT
- */
- struct {
- u32 err_csr;
- u32 rsvd6;
- u32 rsvd7;
- u32 rsvd8;
- } reg_timeout;
-
- struct {
- /* 0: read; 1: write */
- u8 op_type;
- u8 port_id;
- u8 dev_ad;
- u8 rsvd9;
- u32 csr_addr;
- u32 op_data;
- u32 rsvd10;
- } phy_fault;
-};
-
-struct hinic_fault_event {
- u8 type;
- u8 fault_level;
- u8 rsvd0[2];
- union hinic_fault_hw_mgmt event;
-};
-
-struct hinic_cmd_fault_event {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- struct hinic_fault_event event;
-};
-
-enum hinic_fault_type {
- FAULT_TYPE_CHIP,
- FAULT_TYPE_UCODE,
- FAULT_TYPE_MEM_RD_TIMEOUT,
- FAULT_TYPE_MEM_WR_TIMEOUT,
- FAULT_TYPE_REG_RD_TIMEOUT,
- FAULT_TYPE_REG_WR_TIMEOUT,
- FAULT_TYPE_PHY_FAULT,
- FAULT_TYPE_MAX,
-};
-
-enum hinic_fault_err_level {
- FAULT_LEVEL_FATAL,
- FAULT_LEVEL_SERIOUS_RESET,
- FAULT_LEVEL_SERIOUS_FLR,
- FAULT_LEVEL_GENERAL,
- FAULT_LEVEL_SUGGESTION,
- FAULT_LEVEL_MAX
-};
-
-struct hinic_mgmt_watchdog_info {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u32 curr_time_h;
- u32 curr_time_l;
- u32 task_id;
- u32 rsv;
-
- u32 reg[13];
- u32 pc;
- u32 lr;
- u32 cpsr;
-
- u32 stack_top;
- u32 stack_bottom;
- u32 sp;
- u32 curr_used;
- u32 peak_used;
- u32 is_overflow;
-
- u32 stack_actlen;
- u8 data[1024];
-};
-
-void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd, void *handle,
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size));
-
-void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd);
-
-int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out,
- u16 *out_size);
-
-int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out,
- u16 *out_size);
-
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
-
-void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
-
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devlink);
-
-void hinic_free_hwdev(struct hinic_hwdev *hwdev);
-
-int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
-
-struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
-
-struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i);
-
-int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index);
-
-int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer);
-
-int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
- u8 pending_limit, u8 coalesc_timer);
-
-void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
- enum hinic_msix_state flag);
-
-int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info);
-
-int hinic_get_board_info(struct hinic_hwdev *hwdev,
- struct hinic_comm_board_info *board_info);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
deleted file mode 100644
index 045c47786a04..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ /dev/null
@@ -1,996 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/log2.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_dev.h"
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-
-#define HINIC_EQS_WQ_NAME "hinic_eqs"
-
-#define GET_EQ_NUM_PAGES(eq, pg_size) \
- (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
-
-#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size)
-
-#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
- HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
-
-#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
- HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
-
-#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
- HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
-
-#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
- HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
-
-#define GET_EQ_ELEMENT(eq, idx) \
- ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
- (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
-
-#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \
- GET_EQ_ELEMENT(eq, idx))
-
-#define GET_CEQ_ELEM(eq, idx) ((u32 *) \
- GET_EQ_ELEMENT(eq, idx))
-
-#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx)
-
-#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx)
-
-#define PAGE_IN_4K(page_size) ((page_size) >> 12)
-#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size)))
-
-#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
-#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq)))
-
-#define EQ_MAX_PAGES 8
-
-#define CEQE_TYPE_SHIFT 23
-#define CEQE_TYPE_MASK 0x7
-
-#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \
- CEQE_TYPE_MASK)
-
-#define CEQE_DATA_MASK 0x3FFFFFF
-#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK)
-
-#define aeq_to_aeqs(eq) \
- container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
-
-#define ceq_to_ceqs(eq) \
- container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
-
-#define work_to_aeq_work(work) \
- container_of(work, struct hinic_eq_work, work)
-
-#define DMA_ATTR_AEQ_DEFAULT 0
-#define DMA_ATTR_CEQ_DEFAULT 0
-
-/* No coalescence */
-#define THRESH_CEQ_DEFAULT 0
-
-enum eq_int_mode {
- EQ_INT_MODE_ARMED,
- EQ_INT_MODE_ALWAYS
-};
-
-enum eq_arm_state {
- EQ_NOT_ARMED,
- EQ_ARMED
-};
-
-/**
- * hinic_aeq_register_hw_cb - register AEQ callback for specific event
- * @aeqs: pointer to Async eqs of the chip
- * @event: aeq event to register callback for it
- * @handle: private data will be used by the callback
- * @hwe_handler: callback function
- **/
-void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event, void *handle,
- void (*hwe_handler)(void *handle, void *data,
- u8 size))
-{
- struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
-
- hwe_cb->hwe_handler = hwe_handler;
- hwe_cb->handle = handle;
- hwe_cb->hwe_state = HINIC_EQE_ENABLED;
-}
-
-/**
- * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event
- * @aeqs: pointer to Async eqs of the chip
- * @event: aeq event to unregister callback for it
- **/
-void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event)
-{
- struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
-
- hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED;
-
- while (hwe_cb->hwe_state & HINIC_EQE_RUNNING)
- schedule();
-
- hwe_cb->hwe_handler = NULL;
-}
-
-/**
- * hinic_ceq_register_cb - register CEQ callback for specific event
- * @ceqs: pointer to Completion eqs part of the chip
- * @event: ceq event to register callback for it
- * @handle: private data will be used by the callback
- * @handler: callback function
- **/
-void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event, void *handle,
- void (*handler)(void *handle, u32 ceqe_data))
-{
- struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
-
- ceq_cb->handler = handler;
- ceq_cb->handle = handle;
- ceq_cb->ceqe_state = HINIC_EQE_ENABLED;
-}
-
-/**
- * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event
- * @ceqs: pointer to Completion eqs part of the chip
- * @event: ceq event to unregister callback for it
- **/
-void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event)
-{
- struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
-
- ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED;
-
- while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING)
- schedule();
-
- ceq_cb->handler = NULL;
-}
-
-static u8 eq_cons_idx_checksum_set(u32 val)
-{
- u8 checksum = 0;
- int idx;
-
- for (idx = 0; idx < 32; idx += 4)
- checksum ^= ((val >> idx) & 0xF);
-
- return (checksum & 0xF);
-}
-
-/**
- * eq_update_ci - update the HW cons idx of event queue
- * @eq: the event queue to update the cons idx for
- * @arm_state: the arm bit value of eq's interrupt
- **/
-static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
-{
- u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
-
- /* Read Modify Write */
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_EQ_CI_CLEAR(val, IDX) &
- HINIC_EQ_CI_CLEAR(val, WRAPPED) &
- HINIC_EQ_CI_CLEAR(val, INT_ARMED) &
- HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM);
-
- val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
- HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
- HINIC_EQ_CI_SET(arm_state, INT_ARMED);
-
- val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
-}
-
-/**
- * aeq_irq_handler - handler for the AEQ event
- * @eq: the Async Event Queue that received the event
- **/
-static void aeq_irq_handler(struct hinic_eq *eq)
-{
- struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
- struct hinic_hwif *hwif = aeqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_aeq_elem *aeqe_curr;
- struct hinic_hw_event_cb *hwe_cb;
- enum hinic_aeq_type event;
- unsigned long eqe_state;
- u32 aeqe_desc;
- int i, size;
-
- for (i = 0; i < eq->q_len; i++) {
- aeqe_curr = GET_CURR_AEQ_ELEM(eq);
-
- /* Data in HW is in Big endian Format */
- aeqe_desc = be32_to_cpu(aeqe_curr->desc);
-
- /* HW toggles the wrapped bit, when it adds eq element */
- if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
- break;
-
- dma_rmb();
-
- event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
- if (event >= HINIC_MAX_AEQ_EVENTS) {
- dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
- return;
- }
-
- if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
- hwe_cb = &aeqs->hwe_cb[event];
-
- size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
-
- eqe_state = cmpxchg(&hwe_cb->hwe_state,
- HINIC_EQE_ENABLED,
- HINIC_EQE_ENABLED |
- HINIC_EQE_RUNNING);
- if (eqe_state == HINIC_EQE_ENABLED &&
- hwe_cb->hwe_handler)
- hwe_cb->hwe_handler(hwe_cb->handle,
- aeqe_curr->data, size);
- else
- dev_err(&pdev->dev, "Unhandled AEQ Event %d\n",
- event);
-
- hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING;
- }
-
- eq->cons_idx++;
-
- if (eq->cons_idx == eq->q_len) {
- eq->cons_idx = 0;
- eq->wrapped = !eq->wrapped;
- }
- }
-}
-
-/**
- * ceq_event_handler - handler for the ceq events
- * @ceqs: ceqs part of the chip
- * @ceqe: ceq element that describes the event
- **/
-static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
-{
- struct hinic_hwif *hwif = ceqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_ceq_cb *ceq_cb;
- enum hinic_ceq_type event;
- unsigned long eqe_state;
-
- event = CEQE_TYPE(ceqe);
- if (event >= HINIC_MAX_CEQ_EVENTS) {
- dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event);
- return;
- }
-
- ceq_cb = &ceqs->ceq_cb[event];
-
- eqe_state = cmpxchg(&ceq_cb->ceqe_state,
- HINIC_EQE_ENABLED,
- HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
-
- if (eqe_state == HINIC_EQE_ENABLED && ceq_cb->handler)
- ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
- else
- dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
-
- ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING;
-}
-
-/**
- * ceq_irq_handler - handler for the CEQ event
- * @eq: the Completion Event Queue that received the event
- **/
-static void ceq_irq_handler(struct hinic_eq *eq)
-{
- struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
- u32 ceqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- ceqe = *(GET_CURR_CEQ_ELEM(eq));
-
- /* Data in HW is in Big endian Format */
- ceqe = be32_to_cpu(ceqe);
-
- /* HW toggles the wrapped bit, when it adds eq element event */
- if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
- break;
-
- ceq_event_handler(ceqs, ceqe);
-
- eq->cons_idx++;
-
- if (eq->cons_idx == eq->q_len) {
- eq->cons_idx = 0;
- eq->wrapped = !eq->wrapped;
- }
- }
-}
-
-/**
- * eq_irq_handler - handler for the EQ event
- * @data: the Event Queue that received the event
- **/
-static void eq_irq_handler(void *data)
-{
- struct hinic_eq *eq = data;
-
- if (eq->type == HINIC_AEQ)
- aeq_irq_handler(eq);
- else if (eq->type == HINIC_CEQ)
- ceq_irq_handler(eq);
-
- eq_update_ci(eq, EQ_ARMED);
-}
-
-/**
- * eq_irq_work - the work of the EQ that received the event
- * @work: the work struct that is associated with the EQ
- **/
-static void eq_irq_work(struct work_struct *work)
-{
- struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
- struct hinic_eq *aeq;
-
- aeq = aeq_work->data;
- eq_irq_handler(aeq);
-}
-
-/**
- * ceq_tasklet - the tasklet of the EQ that received the event
- * @t: the tasklet struct pointer
- **/
-static void ceq_tasklet(struct tasklet_struct *t)
-{
- struct hinic_eq *ceq = from_tasklet(ceq, t, ceq_tasklet);
-
- eq_irq_handler(ceq);
-}
-
-/**
- * aeq_interrupt - aeq interrupt handler
- * @irq: irq number
- * @data: the Async Event Queue that collected the event
- **/
-static irqreturn_t aeq_interrupt(int irq, void *data)
-{
- struct hinic_eq_work *aeq_work;
- struct hinic_eq *aeq = data;
- struct hinic_aeqs *aeqs;
-
- /* clear resend timer cnt register */
- hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry);
-
- aeq_work = &aeq->aeq_work;
- aeq_work->data = aeq;
-
- aeqs = aeq_to_aeqs(aeq);
- queue_work(aeqs->workq, &aeq_work->work);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ceq_interrupt - ceq interrupt handler
- * @irq: irq number
- * @data: the Completion Event Queue that collected the event
- **/
-static irqreturn_t ceq_interrupt(int irq, void *data)
-{
- struct hinic_eq *ceq = data;
-
- /* clear resend timer cnt register */
- hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
-
- tasklet_schedule(&ceq->ceq_tasklet);
-
- return IRQ_HANDLED;
-}
-
-static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr)
-{
- struct msix_entry *msix_entry = &eq->msix_entry;
- enum hinic_eq_type type = eq->type;
- u32 val, ctrl0;
-
- if (type == HINIC_AEQ) {
- /* RMW Ctrl0 */
- addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) &
- HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
- HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
- HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE);
-
- ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) |
- HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) |
- HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
- PCI_INTF_IDX) |
- HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
-
- val |= ctrl0;
- } else {
- /* RMW Ctrl0 */
- addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) &
- HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
- HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) &
- HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
- HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE);
-
- ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) |
- HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) |
- HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) |
- HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
- PCI_INTF_IDX) |
- HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
-
- val |= ctrl0;
- }
- return val;
-}
-
-static void set_ctrl0(struct hinic_eq *eq)
-{
- u32 val, addr;
-
- if (eq->type == HINIC_AEQ)
- addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
- else
- addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
-
- val = get_ctrl0_val(eq, addr);
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
-}
-
-static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr)
-{
- u32 page_size_val, elem_size, val, ctrl1;
- enum hinic_eq_type type = eq->type;
-
- if (type == HINIC_AEQ) {
- /* RMW Ctrl1 */
- addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
-
- page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
- elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) &
- HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) &
- HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
-
- ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) |
- HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
- HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
-
- val |= ctrl1;
- } else {
- /* RMW Ctrl1 */
- addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
-
- page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) &
- HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
-
- ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
- HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
-
- val |= ctrl1;
- }
- return val;
-}
-
-static void set_ctrl1(struct hinic_eq *eq)
-{
- u32 addr, val;
-
- if (eq->type == HINIC_AEQ)
- addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
- else
- addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
-
- val = get_ctrl1_val(eq, addr);
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
-}
-
-static int set_ceq_ctrl_reg(struct hinic_eq *eq)
-{
- struct hinic_ceq_ctrl_reg ceq_ctrl = {0};
- struct hinic_hwdev *hwdev = eq->hwdev;
- u16 out_size = sizeof(ceq_ctrl);
- u16 in_size = sizeof(ceq_ctrl);
- struct hinic_pfhwdev *pfhwdev;
- u32 addr;
- int err;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
- ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr);
- addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
- ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr);
-
- ceq_ctrl.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- ceq_ctrl.q_id = eq->q_id;
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP,
- &ceq_ctrl, in_size,
- &ceq_ctrl, &out_size, HINIC_MGMT_MSG_SYNC);
- if (err || !out_size || ceq_ctrl.status) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
- eq->q_id, err, ceq_ctrl.status, out_size);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * set_eq_ctrls - setting eq's ctrl registers
- * @eq: the Event Queue for setting
- **/
-static int set_eq_ctrls(struct hinic_eq *eq)
-{
- if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ)
- return set_ceq_ctrl_reg(eq);
-
- set_ctrl0(eq);
- set_ctrl1(eq);
- return 0;
-}
-
-/**
- * aeq_elements_init - initialize all the elements in the aeq
- * @eq: the Async Event Queue
- * @init_val: value to initialize the elements with it
- **/
-static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
-{
- struct hinic_aeq_elem *aeqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- aeqe = GET_AEQ_ELEM(eq, i);
- aeqe->desc = cpu_to_be32(init_val);
- }
-
- wmb(); /* Write the initilzation values */
-}
-
-/**
- * ceq_elements_init - Initialize all the elements in the ceq
- * @eq: the event queue
- * @init_val: value to init with it the elements
- **/
-static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
-{
- u32 *ceqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- ceqe = GET_CEQ_ELEM(eq, i);
- *(ceqe) = cpu_to_be32(init_val);
- }
-
- wmb(); /* Write the initilzation values */
-}
-
-/**
- * alloc_eq_pages - allocate the pages for the queue
- * @eq: the event queue
- *
- * Return 0 - Success, Negative - Failure
- **/
-static int alloc_eq_pages(struct hinic_eq *eq)
-{
- struct hinic_hwif *hwif = eq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u32 init_val, addr, val;
- int err, pg;
-
- eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
- sizeof(*eq->dma_addr), GFP_KERNEL);
- if (!eq->dma_addr)
- return -ENOMEM;
-
- eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
- sizeof(*eq->virt_addr), GFP_KERNEL);
- if (!eq->virt_addr) {
- err = -ENOMEM;
- goto err_virt_addr_alloc;
- }
-
- for (pg = 0; pg < eq->num_pages; pg++) {
- eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
- eq->page_size,
- &eq->dma_addr[pg],
- GFP_KERNEL);
- if (!eq->virt_addr[pg]) {
- err = -ENOMEM;
- goto err_dma_alloc;
- }
-
- addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
- val = upper_32_bits(eq->dma_addr[pg]);
-
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
- val = lower_32_bits(eq->dma_addr[pg]);
-
- hinic_hwif_write_reg(hwif, addr, val);
- }
-
- init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
-
- if (eq->type == HINIC_AEQ)
- aeq_elements_init(eq, init_val);
- else if (eq->type == HINIC_CEQ)
- ceq_elements_init(eq, init_val);
-
- return 0;
-
-err_dma_alloc:
- while (--pg >= 0)
- dma_free_coherent(&pdev->dev, eq->page_size,
- eq->virt_addr[pg],
- eq->dma_addr[pg]);
-
- devm_kfree(&pdev->dev, eq->virt_addr);
-
-err_virt_addr_alloc:
- devm_kfree(&pdev->dev, eq->dma_addr);
- return err;
-}
-
-/**
- * free_eq_pages - free the pages of the queue
- * @eq: the Event Queue
- **/
-static void free_eq_pages(struct hinic_eq *eq)
-{
- struct hinic_hwif *hwif = eq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int pg;
-
- for (pg = 0; pg < eq->num_pages; pg++)
- dma_free_coherent(&pdev->dev, eq->page_size,
- eq->virt_addr[pg],
- eq->dma_addr[pg]);
-
- devm_kfree(&pdev->dev, eq->virt_addr);
- devm_kfree(&pdev->dev, eq->dma_addr);
-}
-
-/**
- * init_eq - initialize Event Queue
- * @eq: the event queue
- * @hwif: the HW interface of a PCI function device
- * @type: the type of the event queue, aeq or ceq
- * @q_id: Queue id number
- * @q_len: the number of EQ elements
- * @page_size: the page size of the pages in the event queue
- * @entry: msix entry associated with the event queue
- *
- * Return 0 - Success, Negative - Failure
- **/
-static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
- enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size,
- struct msix_entry entry)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- eq->hwif = hwif;
- eq->type = type;
- eq->q_id = q_id;
- eq->q_len = q_len;
- eq->page_size = page_size;
-
- /* Clear PI and CI, also clear the ARM bit */
- hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
- hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
-
- eq->cons_idx = 0;
- eq->wrapped = 0;
-
- if (type == HINIC_AEQ) {
- eq->elem_size = HINIC_AEQE_SIZE;
- } else if (type == HINIC_CEQ) {
- eq->elem_size = HINIC_CEQE_SIZE;
- } else {
- dev_err(&pdev->dev, "Invalid EQ type\n");
- return -EINVAL;
- }
-
- eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
- eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
-
- eq->msix_entry = entry;
-
- if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
- dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
- return -EINVAL;
- }
-
- if (eq->num_pages > EQ_MAX_PAGES) {
- dev_err(&pdev->dev, "too many pages for eq\n");
- return -EINVAL;
- }
-
- err = set_eq_ctrls(eq);
- if (err) {
- dev_err(&pdev->dev, "Failed to set eq ctrls\n");
- return err;
- }
-
- eq_update_ci(eq, EQ_ARMED);
-
- err = alloc_eq_pages(eq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
- return err;
- }
-
- if (type == HINIC_AEQ) {
- struct hinic_eq_work *aeq_work = &eq->aeq_work;
-
- INIT_WORK(&aeq_work->work, eq_irq_work);
- } else if (type == HINIC_CEQ) {
- tasklet_setup(&eq->ceq_tasklet, ceq_tasklet);
- }
-
- /* set the attributes of the msix entry */
- hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
- HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT,
- HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT,
- HINIC_EQ_MSIX_LLI_TIMER_DEFAULT,
- HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
- HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
-
- if (type == HINIC_AEQ) {
- snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_aeq%d@pci:%s", eq->q_id,
- pci_name(pdev));
- err = request_irq(entry.vector, aeq_interrupt, 0, eq->irq_name, eq);
- } else if (type == HINIC_CEQ) {
- snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_ceq%d@pci:%s", eq->q_id,
- pci_name(pdev));
- err = request_irq(entry.vector, ceq_interrupt, 0, eq->irq_name, eq);
- }
-
- if (err) {
- dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
- goto err_req_irq;
- }
-
- return 0;
-
-err_req_irq:
- free_eq_pages(eq);
- return err;
-}
-
-/**
- * remove_eq - remove Event Queue
- * @eq: the event queue
- **/
-static void remove_eq(struct hinic_eq *eq)
-{
- hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
- HINIC_MSIX_DISABLE);
- free_irq(eq->msix_entry.vector, eq);
-
- if (eq->type == HINIC_AEQ) {
- struct hinic_eq_work *aeq_work = &eq->aeq_work;
-
- cancel_work_sync(&aeq_work->work);
- /* clear aeq_len to avoid hw access host memory */
- hinic_hwif_write_reg(eq->hwif,
- HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
- } else if (eq->type == HINIC_CEQ) {
- tasklet_kill(&eq->ceq_tasklet);
- /* clear ceq_len to avoid hw access host memory */
- hinic_hwif_write_reg(eq->hwif,
- HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
- }
-
- /* update cons_idx to avoid invalid interrupt */
- eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
- eq_update_ci(eq, EQ_NOT_ARMED);
-
- free_eq_pages(eq);
-}
-
-/**
- * hinic_aeqs_init - initialize all the aeqs
- * @aeqs: pointer to Async eqs of the chip
- * @hwif: the HW interface of a PCI function device
- * @num_aeqs: number of AEQs
- * @q_len: number of EQ elements
- * @page_size: the page size of the pages in the event queue
- * @msix_entries: msix entries associated with the event queues
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
- int num_aeqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err, i, q_id;
-
- aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
- if (!aeqs->workq)
- return -ENOMEM;
-
- aeqs->hwif = hwif;
- aeqs->num_aeqs = num_aeqs;
-
- for (q_id = 0; q_id < num_aeqs; q_id++) {
- err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len,
- page_size, msix_entries[q_id]);
- if (err) {
- dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id);
- goto err_init_aeq;
- }
- }
-
- return 0;
-
-err_init_aeq:
- for (i = 0; i < q_id; i++)
- remove_eq(&aeqs->aeq[i]);
-
- destroy_workqueue(aeqs->workq);
- return err;
-}
-
-/**
- * hinic_aeqs_free - free all the aeqs
- * @aeqs: pointer to Async eqs of the chip
- **/
-void hinic_aeqs_free(struct hinic_aeqs *aeqs)
-{
- int q_id;
-
- for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++)
- remove_eq(&aeqs->aeq[q_id]);
-
- destroy_workqueue(aeqs->workq);
-}
-
-/**
- * hinic_ceqs_init - init all the ceqs
- * @ceqs: ceqs part of the chip
- * @hwif: the hardware interface of a pci function device
- * @num_ceqs: number of CEQs
- * @q_len: number of EQ elements
- * @page_size: the page size of the event queue
- * @msix_entries: msix entries associated with the event queues
- *
- * Return 0 - Success, Negative - Failure
- **/
-int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
- int num_ceqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i, q_id, err;
-
- ceqs->hwif = hwif;
- ceqs->num_ceqs = num_ceqs;
-
- for (q_id = 0; q_id < num_ceqs; q_id++) {
- ceqs->ceq[q_id].hwdev = ceqs->hwdev;
- err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
- page_size, msix_entries[q_id]);
- if (err) {
- dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id);
- goto err_init_ceq;
- }
- }
-
- return 0;
-
-err_init_ceq:
- for (i = 0; i < q_id; i++)
- remove_eq(&ceqs->ceq[i]);
-
- return err;
-}
-
-/**
- * hinic_ceqs_free - free all the ceqs
- * @ceqs: ceqs part of the chip
- **/
-void hinic_ceqs_free(struct hinic_ceqs *ceqs)
-{
- int q_id;
-
- for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
- remove_eq(&ceqs->ceq[q_id]);
-}
-
-void hinic_dump_ceq_info(struct hinic_hwdev *hwdev)
-{
- struct hinic_eq *eq = NULL;
- u32 addr, ci, pi;
- int q_id;
-
- for (q_id = 0; q_id < hwdev->func_to_io.ceqs.num_ceqs; q_id++) {
- eq = &hwdev->func_to_io.ceqs.ceq[q_id];
- addr = EQ_CONS_IDX_REG_ADDR(eq);
- ci = hinic_hwif_read_reg(hwdev->hwif, addr);
- addr = EQ_PROD_IDX_REG_ADDR(eq);
- pi = hinic_hwif_read_reg(hwdev->hwif, addr);
- dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n",
- q_id, ci, eq->cons_idx, pi,
- eq->ceq_tasklet.state,
- eq->wrapped, be32_to_cpu(*(__be32 *)(GET_CURR_CEQ_ELEM(eq))));
- }
-}
-
-void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
-{
- struct hinic_aeq_elem *aeqe_pos = NULL;
- struct hinic_eq *eq = NULL;
- u32 addr, ci, pi;
- int q_id;
-
- for (q_id = 0; q_id < hwdev->aeqs.num_aeqs; q_id++) {
- eq = &hwdev->aeqs.aeq[q_id];
- addr = EQ_CONS_IDX_REG_ADDR(eq);
- ci = hinic_hwif_read_reg(hwdev->hwif, addr);
- addr = EQ_PROD_IDX_REG_ADDR(eq);
- pi = hinic_hwif_read_reg(hwdev->hwif, addr);
- aeqe_pos = GET_CURR_AEQ_ELEM(eq);
- dev_err(&hwdev->hwif->pdev->dev, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n",
- q_id, ci, pi, work_busy(&eq->aeq_work.work),
- eq->wrapped, be32_to_cpu(aeqe_pos->desc));
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
deleted file mode 100644
index 2f3222174fc7..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_EQS_H
-#define HINIC_HW_EQS_H
-
-#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <linux/pci.h>
-#include <linux/sizes.h>
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-
-#include "hinic_hw_if.h"
-
-#define HINIC_AEQ_CTRL_0_INT_IDX_SHIFT 0
-#define HINIC_AEQ_CTRL_0_DMA_ATTR_SHIFT 12
-#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
-#define HINIC_AEQ_CTRL_0_INT_MODE_SHIFT 31
-
-#define HINIC_AEQ_CTRL_0_INT_IDX_MASK 0x3FF
-#define HINIC_AEQ_CTRL_0_DMA_ATTR_MASK 0x3F
-#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_AEQ_CTRL_0_INT_MODE_MASK 0x1
-
-#define HINIC_AEQ_CTRL_0_SET(val, member) \
- (((u32)(val) & HINIC_AEQ_CTRL_0_##member##_MASK) << \
- HINIC_AEQ_CTRL_0_##member##_SHIFT)
-
-#define HINIC_AEQ_CTRL_0_CLEAR(val, member) \
- ((val) & (~(HINIC_AEQ_CTRL_0_##member##_MASK \
- << HINIC_AEQ_CTRL_0_##member##_SHIFT)))
-
-#define HINIC_AEQ_CTRL_1_LEN_SHIFT 0
-#define HINIC_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
-#define HINIC_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
-
-#define HINIC_AEQ_CTRL_1_LEN_MASK 0x1FFFFF
-#define HINIC_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3
-#define HINIC_AEQ_CTRL_1_PAGE_SIZE_MASK 0xF
-
-#define HINIC_AEQ_CTRL_1_SET(val, member) \
- (((u32)(val) & HINIC_AEQ_CTRL_1_##member##_MASK) << \
- HINIC_AEQ_CTRL_1_##member##_SHIFT)
-
-#define HINIC_AEQ_CTRL_1_CLEAR(val, member) \
- ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \
- << HINIC_AEQ_CTRL_1_##member##_SHIFT)))
-
-#define HINIC_CEQ_CTRL_0_INTR_IDX_SHIFT 0
-#define HINIC_CEQ_CTRL_0_DMA_ATTR_SHIFT 12
-#define HINIC_CEQ_CTRL_0_KICK_THRESH_SHIFT 20
-#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24
-#define HINIC_CEQ_CTRL_0_INTR_MODE_SHIFT 31
-
-#define HINIC_CEQ_CTRL_0_INTR_IDX_MASK 0x3FF
-#define HINIC_CEQ_CTRL_0_DMA_ATTR_MASK 0x3F
-#define HINIC_CEQ_CTRL_0_KICK_THRESH_MASK 0xF
-#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_CEQ_CTRL_0_INTR_MODE_MASK 0x1
-
-#define HINIC_CEQ_CTRL_0_SET(val, member) \
- (((u32)(val) & HINIC_CEQ_CTRL_0_##member##_MASK) << \
- HINIC_CEQ_CTRL_0_##member##_SHIFT)
-
-#define HINIC_CEQ_CTRL_0_CLEAR(val, member) \
- ((val) & (~(HINIC_CEQ_CTRL_0_##member##_MASK \
- << HINIC_CEQ_CTRL_0_##member##_SHIFT)))
-
-#define HINIC_CEQ_CTRL_1_LEN_SHIFT 0
-#define HINIC_CEQ_CTRL_1_PAGE_SIZE_SHIFT 28
-
-#define HINIC_CEQ_CTRL_1_LEN_MASK 0x1FFFFF
-#define HINIC_CEQ_CTRL_1_PAGE_SIZE_MASK 0xF
-
-#define HINIC_CEQ_CTRL_1_SET(val, member) \
- (((u32)(val) & HINIC_CEQ_CTRL_1_##member##_MASK) << \
- HINIC_CEQ_CTRL_1_##member##_SHIFT)
-
-#define HINIC_CEQ_CTRL_1_CLEAR(val, member) \
- ((val) & (~(HINIC_CEQ_CTRL_1_##member##_MASK \
- << HINIC_CEQ_CTRL_1_##member##_SHIFT)))
-
-#define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0
-#define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7
-#define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8
-#define HINIC_EQ_ELEM_DESC_WRAPPED_SHIFT 31
-
-#define HINIC_EQ_ELEM_DESC_TYPE_MASK 0x7F
-#define HINIC_EQ_ELEM_DESC_SRC_MASK 0x1
-#define HINIC_EQ_ELEM_DESC_SIZE_MASK 0xFF
-#define HINIC_EQ_ELEM_DESC_WRAPPED_MASK 0x1
-
-#define HINIC_EQ_ELEM_DESC_SET(val, member) \
- (((u32)(val) & HINIC_EQ_ELEM_DESC_##member##_MASK) << \
- HINIC_EQ_ELEM_DESC_##member##_SHIFT)
-
-#define HINIC_EQ_ELEM_DESC_GET(val, member) \
- (((val) >> HINIC_EQ_ELEM_DESC_##member##_SHIFT) & \
- HINIC_EQ_ELEM_DESC_##member##_MASK)
-
-#define HINIC_EQ_CI_IDX_SHIFT 0
-#define HINIC_EQ_CI_WRAPPED_SHIFT 20
-#define HINIC_EQ_CI_XOR_CHKSUM_SHIFT 24
-#define HINIC_EQ_CI_INT_ARMED_SHIFT 31
-
-#define HINIC_EQ_CI_IDX_MASK 0xFFFFF
-#define HINIC_EQ_CI_WRAPPED_MASK 0x1
-#define HINIC_EQ_CI_XOR_CHKSUM_MASK 0xF
-#define HINIC_EQ_CI_INT_ARMED_MASK 0x1
-
-#define HINIC_EQ_CI_SET(val, member) \
- (((u32)(val) & HINIC_EQ_CI_##member##_MASK) << \
- HINIC_EQ_CI_##member##_SHIFT)
-
-#define HINIC_EQ_CI_CLEAR(val, member) \
- ((val) & (~(HINIC_EQ_CI_##member##_MASK \
- << HINIC_EQ_CI_##member##_SHIFT)))
-
-#define HINIC_MAX_AEQS 4
-#define HINIC_MAX_CEQS 32
-
-#define HINIC_AEQE_SIZE 64
-#define HINIC_CEQE_SIZE 4
-
-#define HINIC_AEQE_DESC_SIZE 4
-#define HINIC_AEQE_DATA_SIZE \
- (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
-
-#define HINIC_DEFAULT_AEQ_LEN 64
-#define HINIC_DEFAULT_CEQ_LEN 1024
-
-#define HINIC_EQ_PAGE_SIZE SZ_4K
-
-#define HINIC_CEQ_ID_CMDQ 0
-
-enum hinic_eq_type {
- HINIC_AEQ,
- HINIC_CEQ,
-};
-
-enum hinic_aeq_type {
- HINIC_MBX_FROM_FUNC = 1,
- HINIC_MSG_FROM_MGMT_CPU = 2,
- HINIC_MBX_SEND_RSLT = 5,
- HINIC_MAX_AEQ_EVENTS,
-};
-
-enum hinic_ceq_type {
- HINIC_CEQ_CMDQ = 3,
-
- HINIC_MAX_CEQ_EVENTS,
-};
-
-enum hinic_eqe_state {
- HINIC_EQE_ENABLED = BIT(0),
- HINIC_EQE_RUNNING = BIT(1),
-};
-
-struct hinic_aeq_elem {
- u8 data[HINIC_AEQE_DATA_SIZE];
- __be32 desc;
-};
-
-struct hinic_eq_work {
- struct work_struct work;
- void *data;
-};
-
-struct hinic_eq {
- struct hinic_hwif *hwif;
- struct hinic_hwdev *hwdev;
- enum hinic_eq_type type;
- int q_id;
- u32 q_len;
- u32 page_size;
-
- u32 cons_idx;
- int wrapped;
-
- size_t elem_size;
- int num_pages;
- int num_elem_in_pg;
-
- struct msix_entry msix_entry;
- char irq_name[64];
-
- dma_addr_t *dma_addr;
- void **virt_addr;
-
- struct hinic_eq_work aeq_work;
-
- struct tasklet_struct ceq_tasklet;
-};
-
-struct hinic_hw_event_cb {
- void (*hwe_handler)(void *handle, void *data, u8 size);
- void *handle;
- unsigned long hwe_state;
-};
-
-struct hinic_aeqs {
- struct hinic_hwif *hwif;
-
- struct hinic_eq aeq[HINIC_MAX_AEQS];
- int num_aeqs;
-
- struct hinic_hw_event_cb hwe_cb[HINIC_MAX_AEQ_EVENTS];
-
- struct workqueue_struct *workq;
-};
-
-struct hinic_ceq_cb {
- void (*handler)(void *handle, u32 ceqe_data);
- void *handle;
- enum hinic_eqe_state ceqe_state;
-};
-
-struct hinic_ceqs {
- struct hinic_hwif *hwif;
- struct hinic_hwdev *hwdev;
- struct hinic_eq ceq[HINIC_MAX_CEQS];
- int num_ceqs;
-
- struct hinic_ceq_cb ceq_cb[HINIC_MAX_CEQ_EVENTS];
-};
-
-void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event, void *handle,
- void (*hwe_handler)(void *handle, void *data,
- u8 size));
-
-void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event);
-
-void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event, void *handle,
- void (*ceq_cb)(void *handle, u32 ceqe_data));
-
-void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event);
-
-int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
- int num_aeqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries);
-
-void hinic_aeqs_free(struct hinic_aeqs *aeqs);
-
-int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
- int num_ceqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries);
-
-void hinic_ceqs_free(struct hinic_ceqs *ceqs);
-
-void hinic_dump_ceq_info(struct hinic_hwdev *hwdev);
-
-void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
deleted file mode 100644
index 88567305d06e..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ /dev/null
@@ -1,419 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-
-#define PCIE_ATTR_ENTRY 0
-
-#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
-
-#define WAIT_HWIF_READY_TIMEOUT 10000
-
-#define HINIC_SELFTEST_RESULT 0x883C
-
-/**
- * hinic_msix_attr_set - set message attribute for msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer, u8 lli_credit_limit,
- u8 resend_timer)
-{
- u32 msix_ctrl, addr;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) |
- HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) |
- HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) |
- HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) |
- HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER);
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
-
- hinic_hwif_write_reg(hwif, addr, msix_ctrl);
- return 0;
-}
-
-/**
- * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
-{
- u32 msix_ctrl, addr;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER);
- addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index);
-
- hinic_hwif_write_reg(hwif, addr, msix_ctrl);
- return 0;
-}
-
-/**
- * hinic_set_pf_action - set action on pf channel
- * @hwif: the HW interface of a pci function device
- * @action: action on pf channel
- **/
-void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
-{
- u32 attr5;
-
- if (HINIC_IS_VF(hwif))
- return;
-
- attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
- attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION);
- attr5 |= HINIC_FA5_SET(action, PF_ACTION);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5);
-}
-
-enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- return HINIC_FA4_GET(attr4, OUTBOUND_STATE);
-}
-
-void hinic_outbound_state_set(struct hinic_hwif *hwif,
- enum hinic_outbound_state outbound_state)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE);
- attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
-}
-
-enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- return HINIC_FA4_GET(attr4, DB_STATE);
-}
-
-void hinic_db_state_set(struct hinic_hwif *hwif,
- enum hinic_db_state db_state)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE);
- attr4 |= HINIC_FA4_SET(db_state, DB_STATE);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
-}
-
-void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
- enum hinic_msix_state flag)
-{
- u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE +
- HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL;
- u32 mask_bits;
-
- mask_bits = readl(hwif->intr_regs_base + offset);
- mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
-
- if (flag)
- mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
-
- writel(mask_bits, hwif->intr_regs_base + offset);
-}
-
-/**
- * hwif_ready - test if the HW is ready for use
- * @hwif: the HW interface of a pci function device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int hwif_ready(struct hinic_hwif *hwif)
-{
- u32 addr, attr1;
-
- addr = HINIC_CSR_FUNC_ATTR1_ADDR;
- attr1 = hinic_hwif_read_reg(hwif, addr);
-
- if (!HINIC_FA1_GET(attr1, MGMT_INIT_STATUS))
- return -EBUSY;
-
- if (HINIC_IS_VF(hwif)) {
- if (!HINIC_FA1_GET(attr1, PF_INIT_STATUS))
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int wait_hwif_ready(struct hinic_hwif *hwif)
-{
- unsigned long timeout = 0;
-
- do {
- if (!hwif_ready(hwif))
- return 0;
-
- usleep_range(999, 1000);
- timeout++;
- } while (timeout <= WAIT_HWIF_READY_TIMEOUT);
-
- dev_err(&hwif->pdev->dev, "Wait for hwif timeout\n");
-
- return -EBUSY;
-}
-
-/**
- * set_hwif_attr - set the attributes in the relevant members in hwif
- * @hwif: the HW interface of a pci function device
- * @attr0: the first attribute that was read from the hw
- * @attr1: the second attribute that was read from the hw
- * @attr2: the third attribute that was read from the hw
- **/
-static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
- u32 attr2)
-{
- hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX);
- hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX);
- hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX);
- hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE);
-
- hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC));
- hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC));
- hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC));
- hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC));
- hwif->attr.global_vf_id_of_pf = HINIC_FA2_GET(attr2,
- GLOBAL_VF_ID_OF_PF);
-}
-
-/**
- * read_hwif_attr - read the attributes and set members in hwif
- * @hwif: the HW interface of a pci function device
- **/
-static void read_hwif_attr(struct hinic_hwif *hwif)
-{
- u32 addr, attr0, attr1, attr2;
-
- addr = HINIC_CSR_FUNC_ATTR0_ADDR;
- attr0 = hinic_hwif_read_reg(hwif, addr);
-
- addr = HINIC_CSR_FUNC_ATTR1_ADDR;
- attr1 = hinic_hwif_read_reg(hwif, addr);
-
- addr = HINIC_CSR_FUNC_ATTR2_ADDR;
- attr2 = hinic_hwif_read_reg(hwif, addr);
-
- set_hwif_attr(hwif, attr0, attr1, attr2);
-}
-
-/**
- * set_ppf - try to set hwif as ppf and set the type of hwif in this case
- * @hwif: the HW interface of a pci function device
- **/
-static void set_ppf(struct hinic_hwif *hwif)
-{
- struct hinic_func_attr *attr = &hwif->attr;
- u32 addr, val, ppf_election;
-
- /* Read Modify Write */
- addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif));
-
- val = hinic_hwif_read_reg(hwif, addr);
- val = HINIC_PPF_ELECTION_CLEAR(val, IDX);
-
- ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX);
-
- val |= ppf_election;
- hinic_hwif_write_reg(hwif, addr, val);
-
- /* check PPF */
- val = hinic_hwif_read_reg(hwif, addr);
-
- attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX);
- if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif))
- attr->func_type = HINIC_PPF;
-}
-
-/**
- * set_dma_attr - set the dma attributes in the HW
- * @hwif: the HW interface of a pci function device
- * @entry_idx: the entry index in the dma table
- * @st: PCIE TLP steering tag
- * @at: PCIE TLP AT field
- * @ph: PCIE TLP Processing Hint field
- * @no_snooping: PCIE TLP No snooping
- * @tph_en: PCIE TLP Processing Hint Enable
- **/
-static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx,
- u8 st, u8 at, u8 ph,
- enum hinic_pcie_nosnoop no_snooping,
- enum hinic_pcie_tph tph_en)
-{
- u32 addr, val, dma_attr_entry;
-
- /* Read Modify Write */
- addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx);
-
- val = hinic_hwif_read_reg(hwif, addr);
- val = HINIC_DMA_ATTR_CLEAR(val, ST) &
- HINIC_DMA_ATTR_CLEAR(val, AT) &
- HINIC_DMA_ATTR_CLEAR(val, PH) &
- HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) &
- HINIC_DMA_ATTR_CLEAR(val, TPH_EN);
-
- dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) |
- HINIC_DMA_ATTR_SET(at, AT) |
- HINIC_DMA_ATTR_SET(ph, PH) |
- HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) |
- HINIC_DMA_ATTR_SET(tph_en, TPH_EN);
-
- val |= dma_attr_entry;
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * dma_attr_init - initialize the default dma attributes
- * @hwif: the HW interface of a pci function device
- **/
-static void dma_attr_init(struct hinic_hwif *hwif)
-{
- set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE,
- HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE,
- HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE);
-}
-
-u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif)
-{
- if (!hwif)
- return 0;
-
- return hwif->attr.global_vf_id_of_pf;
-}
-
-u16 hinic_global_func_id_hw(struct hinic_hwif *hwif)
-{
- u32 addr, attr0;
-
- addr = HINIC_CSR_FUNC_ATTR0_ADDR;
- attr0 = hinic_hwif_read_reg(hwif, addr);
-
- return HINIC_FA0_GET(attr0, FUNC_IDX);
-}
-
-u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif)
-{
- u32 addr, attr0;
-
- addr = HINIC_CSR_FUNC_ATTR0_ADDR;
- attr0 = hinic_hwif_read_reg(hwif, addr);
-
- return HINIC_FA0_GET(attr0, PF_IDX);
-}
-
-static void __print_selftest_reg(struct hinic_hwif *hwif)
-{
- u32 addr, attr0, attr1;
-
- addr = HINIC_CSR_FUNC_ATTR1_ADDR;
- attr1 = hinic_hwif_read_reg(hwif, addr);
-
- if (attr1 == HINIC_PCIE_LINK_DOWN) {
- dev_err(&hwif->pdev->dev, "PCIE is link down\n");
- return;
- }
-
- addr = HINIC_CSR_FUNC_ATTR0_ADDR;
- attr0 = hinic_hwif_read_reg(hwif, addr);
- if (HINIC_FA0_GET(attr0, FUNC_TYPE) != HINIC_VF &&
- !HINIC_FA0_GET(attr0, PCI_INTF_IDX))
- dev_err(&hwif->pdev->dev, "Selftest reg: 0x%08x\n",
- hinic_hwif_read_reg(hwif, HINIC_SELFTEST_RESULT));
-}
-
-/**
- * hinic_init_hwif - initialize the hw interface
- * @hwif: the HW interface of a pci function device
- * @pdev: the pci device for accessing PCI resources
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
-{
- int err;
-
- hwif->pdev = pdev;
-
- hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR);
- if (!hwif->cfg_regs_bar) {
- dev_err(&pdev->dev, "Failed to map configuration regs\n");
- return -ENOMEM;
- }
-
- hwif->intr_regs_base = pci_ioremap_bar(pdev, HINIC_PCI_INTR_REGS_BAR);
- if (!hwif->intr_regs_base) {
- dev_err(&pdev->dev, "Failed to map configuration regs\n");
- err = -ENOMEM;
- goto err_map_intr_bar;
- }
-
- err = wait_hwif_ready(hwif);
- if (err) {
- dev_err(&pdev->dev, "HW interface is not ready\n");
- __print_selftest_reg(hwif);
- goto err_hwif_ready;
- }
-
- read_hwif_attr(hwif);
-
- if (HINIC_IS_PF(hwif))
- set_ppf(hwif);
-
- /* No transactionss before DMA is initialized */
- dma_attr_init(hwif);
- return 0;
-
-err_hwif_ready:
- iounmap(hwif->intr_regs_base);
-
-err_map_intr_bar:
- iounmap(hwif->cfg_regs_bar);
-
- return err;
-}
-
-/**
- * hinic_free_hwif - free the HW interface
- * @hwif: the HW interface of a pci function device
- **/
-void hinic_free_hwif(struct hinic_hwif *hwif)
-{
- iounmap(hwif->intr_regs_base);
- iounmap(hwif->cfg_regs_bar);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
deleted file mode 100644
index 3d588896a367..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_IF_H
-#define HINIC_HW_IF_H
-
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#define HINIC_PCIE_LINK_DOWN 0xFFFFFFFF
-
-#define HINIC_DMA_ATTR_ST_SHIFT 0
-#define HINIC_DMA_ATTR_AT_SHIFT 8
-#define HINIC_DMA_ATTR_PH_SHIFT 10
-#define HINIC_DMA_ATTR_NO_SNOOPING_SHIFT 12
-#define HINIC_DMA_ATTR_TPH_EN_SHIFT 13
-
-#define HINIC_DMA_ATTR_ST_MASK 0xFF
-#define HINIC_DMA_ATTR_AT_MASK 0x3
-#define HINIC_DMA_ATTR_PH_MASK 0x3
-#define HINIC_DMA_ATTR_NO_SNOOPING_MASK 0x1
-#define HINIC_DMA_ATTR_TPH_EN_MASK 0x1
-
-#define HINIC_DMA_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_DMA_ATTR_##member##_MASK) << \
- HINIC_DMA_ATTR_##member##_SHIFT)
-
-#define HINIC_DMA_ATTR_CLEAR(val, member) \
- ((val) & (~(HINIC_DMA_ATTR_##member##_MASK \
- << HINIC_DMA_ATTR_##member##_SHIFT)))
-
-#define HINIC_FA0_FUNC_IDX_SHIFT 0
-#define HINIC_FA0_PF_IDX_SHIFT 10
-#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14
-#define HINIC_FA0_VF_IN_PF_SHIFT 16
-/* reserved members - off 16 */
-#define HINIC_FA0_FUNC_TYPE_SHIFT 24
-
-#define HINIC_FA0_FUNC_IDX_MASK 0x3FF
-#define HINIC_FA0_PF_IDX_MASK 0xF
-#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_FA0_FUNC_TYPE_MASK 0x1
-#define HINIC_FA0_VF_IN_PF_MASK 0xFF
-
-#define HINIC_FA0_GET(val, member) \
- (((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK)
-
-#define HINIC_FA1_AEQS_PER_FUNC_SHIFT 8
-/* reserved members - off 10 */
-#define HINIC_FA1_CEQS_PER_FUNC_SHIFT 12
-/* reserved members - off 15 */
-#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20
-#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24
-/* reserved members - off 27 */
-#define HINIC_FA1_MGMT_INIT_STATUS_SHIFT 30
-#define HINIC_FA1_PF_INIT_STATUS_SHIFT 31
-
-#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3
-#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7
-#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF
-#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7
-#define HINIC_FA1_MGMT_INIT_STATUS_MASK 0x1
-#define HINIC_FA1_PF_INIT_STATUS_MASK 0x1
-
-#define HINIC_FA1_GET(val, member) \
- (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK)
-
-#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_SHIFT 16
-#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF
-
-#define HINIC_FA2_GET(val, member) \
- (((val) >> HINIC_FA2_##member##_SHIFT) & HINIC_FA2_##member##_MASK)
-
-#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0
-#define HINIC_FA4_DB_STATE_SHIFT 1
-
-#define HINIC_FA4_OUTBOUND_STATE_MASK 0x1
-#define HINIC_FA4_DB_STATE_MASK 0x1
-
-#define HINIC_FA4_GET(val, member) \
- (((val) >> HINIC_FA4_##member##_SHIFT) & HINIC_FA4_##member##_MASK)
-
-#define HINIC_FA4_SET(val, member) \
- ((((u32)val) & HINIC_FA4_##member##_MASK) << HINIC_FA4_##member##_SHIFT)
-
-#define HINIC_FA4_CLEAR(val, member) \
- ((val) & (~(HINIC_FA4_##member##_MASK << HINIC_FA4_##member##_SHIFT)))
-
-#define HINIC_FA5_PF_ACTION_SHIFT 0
-#define HINIC_FA5_PF_ACTION_MASK 0xFFFF
-
-#define HINIC_FA5_SET(val, member) \
- (((u32)(val) & HINIC_FA5_##member##_MASK) << HINIC_FA5_##member##_SHIFT)
-
-#define HINIC_FA5_CLEAR(val, member) \
- ((val) & (~(HINIC_FA5_##member##_MASK << HINIC_FA5_##member##_SHIFT)))
-
-#define HINIC_PPF_ELECTION_IDX_SHIFT 0
-#define HINIC_PPF_ELECTION_IDX_MASK 0x1F
-
-#define HINIC_PPF_ELECTION_SET(val, member) \
- (((u32)(val) & HINIC_PPF_ELECTION_##member##_MASK) << \
- HINIC_PPF_ELECTION_##member##_SHIFT)
-
-#define HINIC_PPF_ELECTION_GET(val, member) \
- (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \
- HINIC_PPF_ELECTION_##member##_MASK)
-
-#define HINIC_PPF_ELECTION_CLEAR(val, member) \
- ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \
- << HINIC_PPF_ELECTION_##member##_SHIFT)))
-
-#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0
-#define HINIC_MSIX_COALESC_TIMER_SHIFT 8
-#define HINIC_MSIX_LLI_TIMER_SHIFT 16
-#define HINIC_MSIX_LLI_CREDIT_SHIFT 24
-#define HINIC_MSIX_RESEND_TIMER_SHIFT 29
-
-#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFF
-#define HINIC_MSIX_COALESC_TIMER_MASK 0xFF
-#define HINIC_MSIX_LLI_TIMER_MASK 0xFF
-#define HINIC_MSIX_LLI_CREDIT_MASK 0x1F
-#define HINIC_MSIX_RESEND_TIMER_MASK 0x7
-
-#define HINIC_MSIX_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_MSIX_##member##_MASK) << \
- HINIC_MSIX_##member##_SHIFT)
-
-#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
-
-#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
-
-#define HINIC_MSIX_CNT_SET(val, member) \
- (((u32)(val) & HINIC_MSIX_CNT_##member##_MASK) << \
- HINIC_MSIX_CNT_##member##_SHIFT)
-
-#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
-#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
-#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs)
-#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
-#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
-#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
-#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
-
-#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
-#define HINIC_IS_VF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_VF)
-#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
-#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
-
-#define HINIC_PCI_CFG_REGS_BAR 0
-#define HINIC_PCI_INTR_REGS_BAR 2
-#define HINIC_PCI_DB_BAR 4
-
-#define HINIC_PCIE_ST_DISABLE 0
-#define HINIC_PCIE_AT_DISABLE 0
-#define HINIC_PCIE_PH_DISABLE 0
-
-#define HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT 0xFF /* max */
-#define HINIC_EQ_MSIX_LLI_TIMER_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */
-
-#define HINIC_PCI_MSIX_ENTRY_SIZE 16
-#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12
-#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1
-
-enum hinic_pcie_nosnoop {
- HINIC_PCIE_SNOOP = 0,
- HINIC_PCIE_NO_SNOOP = 1,
-};
-
-enum hinic_pcie_tph {
- HINIC_PCIE_TPH_DISABLE = 0,
- HINIC_PCIE_TPH_ENABLE = 1,
-};
-
-enum hinic_func_type {
- HINIC_PF = 0,
- HINIC_VF = 1,
- HINIC_PPF = 2,
-};
-
-enum hinic_mod_type {
- HINIC_MOD_COMM = 0, /* HW communication module */
- HINIC_MOD_L2NIC = 1, /* L2NIC module */
- HINIC_MOD_CFGM = 7, /* Configuration module */
- HINIC_MOD_HILINK = 14, /* Hilink module */
- HINIC_MOD_MAX = 15
-};
-
-enum hinic_node_id {
- HINIC_NODE_ID_MGMT = 21,
-};
-
-enum hinic_pf_action {
- HINIC_PF_MGMT_INIT = 0x0,
-
- HINIC_PF_MGMT_ACTIVE = 0x11,
-};
-
-enum hinic_outbound_state {
- HINIC_OUTBOUND_ENABLE = 0,
- HINIC_OUTBOUND_DISABLE = 1,
-};
-
-enum hinic_db_state {
- HINIC_DB_ENABLE = 0,
- HINIC_DB_DISABLE = 1,
-};
-
-enum hinic_msix_state {
- HINIC_MSIX_ENABLE,
- HINIC_MSIX_DISABLE,
-};
-
-struct hinic_func_attr {
- u16 func_idx;
- u8 pf_idx;
- u8 pci_intf_idx;
-
- enum hinic_func_type func_type;
-
- u8 ppf_idx;
-
- u16 num_irqs;
- u8 num_aeqs;
- u8 num_ceqs;
-
- u8 num_dma_attr;
-
- u16 global_vf_id_of_pf;
-};
-
-struct hinic_hwif {
- struct pci_dev *pdev;
- void __iomem *cfg_regs_bar;
- void __iomem *intr_regs_base;
-
- struct hinic_func_attr attr;
-};
-
-static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg)
-{
- u32 out = readl(hwif->cfg_regs_bar + reg);
-
- return be32_to_cpu(*(__be32 *)&out);
-}
-
-static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg,
- u32 val)
-{
- __be32 in = cpu_to_be32(val);
-
- writel(*(u32 *)&in, hwif->cfg_regs_bar + reg);
-}
-
-int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer);
-
-void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
- enum hinic_msix_state flag);
-
-int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index);
-
-void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action);
-
-enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif);
-
-void hinic_outbound_state_set(struct hinic_hwif *hwif,
- enum hinic_outbound_state outbound_state);
-
-enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif);
-
-void hinic_db_state_set(struct hinic_hwif *hwif,
- enum hinic_db_state db_state);
-
-u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif);
-
-u16 hinic_global_func_id_hw(struct hinic_hwif *hwif);
-
-u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif);
-
-int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev);
-
-void hinic_free_hwif(struct hinic_hwif *hwif);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
deleted file mode 100644
index c4a0ba6e183a..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ /dev/null
@@ -1,634 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/semaphore.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/err.h>
-
-#include "hinic_hw_dev.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define CI_Q_ADDR_SIZE sizeof(u32)
-
-#define CI_ADDR(base_addr, q_id) ((base_addr) + \
- (q_id) * CI_Q_ADDR_SIZE)
-
-#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE)
-
-#define DB_IDX(db, db_base) \
- (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
-
-#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
-
-enum io_cmd {
- IO_CMD_MODIFY_QUEUE_CTXT = 0,
- IO_CMD_CLEAN_QUEUE_CTXT,
-};
-
-static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
-{
- int i;
-
- for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
- free_db_area->db_idx[i] = i;
-
- free_db_area->alloc_pos = 0;
- free_db_area->return_pos = HINIC_DB_MAX_AREAS;
-
- free_db_area->num_free = HINIC_DB_MAX_AREAS;
-
- sema_init(&free_db_area->idx_lock, 1);
-}
-
-static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
-{
- struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
- int pos, idx;
-
- down(&free_db_area->idx_lock);
-
- free_db_area->num_free--;
-
- if (free_db_area->num_free < 0) {
- free_db_area->num_free++;
- up(&free_db_area->idx_lock);
- return ERR_PTR(-ENOMEM);
- }
-
- pos = free_db_area->alloc_pos++;
- pos &= HINIC_DB_MAX_AREAS - 1;
-
- idx = free_db_area->db_idx[pos];
-
- free_db_area->db_idx[pos] = -1;
-
- up(&free_db_area->idx_lock);
-
- return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
-}
-
-static void return_db_area(struct hinic_func_to_io *func_to_io,
- void __iomem *db_base)
-{
- struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
- int pos, idx = DB_IDX(db_base, func_to_io->db_base);
-
- down(&free_db_area->idx_lock);
-
- pos = free_db_area->return_pos++;
- pos &= HINIC_DB_MAX_AREAS - 1;
-
- free_db_area->db_idx[pos] = idx;
-
- free_db_area->num_free++;
-
- up(&free_db_area->idx_lock);
-}
-
-static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_sqs)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct hinic_sq_ctxt_block *sq_ctxt_block;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_buf cmdq_buf;
- struct hinic_sq_ctxt *sq_ctxt;
- struct hinic_qp *qp;
- u64 out_param;
- int err, i;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- sq_ctxt_block = cmdq_buf.buf;
- sq_ctxt = sq_ctxt_block->sq_ctxt;
-
- hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
- num_sqs, func_to_io->max_qps);
- for (i = 0; i < num_sqs; i++) {
- qp = &func_to_io->qps[i];
-
- hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
- base_qpn + qp->q_id);
- }
-
- cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
- &out_param);
- if (err || out_param != 0) {
- dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
- err = -EFAULT;
- }
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- return err;
-}
-
-static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_rqs)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct hinic_rq_ctxt_block *rq_ctxt_block;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_buf cmdq_buf;
- struct hinic_rq_ctxt *rq_ctxt;
- struct hinic_qp *qp;
- u64 out_param;
- int err, i;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- rq_ctxt_block = cmdq_buf.buf;
- rq_ctxt = rq_ctxt_block->rq_ctxt;
-
- hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
- num_rqs, func_to_io->max_qps);
- for (i = 0; i < num_rqs; i++) {
- qp = &func_to_io->qps[i];
-
- hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
- base_qpn + qp->q_id);
- }
-
- cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
- &out_param);
- if (err || out_param != 0) {
- dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
- err = -EFAULT;
- }
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- return err;
-}
-
-/**
- * write_qp_ctxts - write the qp ctxt to HW
- * @func_to_io: func to io channel that holds the IO components
- * @base_qpn: first qp number
- * @num_qps: number of qps to write
- *
- * Return 0 - Success, negative - Failure
- **/
-static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_qps)
-{
- return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
- write_rq_ctxts(func_to_io, base_qpn, num_qps));
-}
-
-static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io,
- enum hinic_qp_ctxt_type ctxt_type)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct hinic_clean_queue_ctxt *ctxt_block;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_buf cmdq_buf;
- u64 out_param = 0;
- int err;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- ctxt_block = cmdq_buf.buf;
- ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps;
- ctxt_block->cmdq_hdr.queue_type = ctxt_type;
- ctxt_block->cmdq_hdr.addr_offset = 0;
-
- /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
- ctxt_block->ctxt_size = 0x3;
-
- hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
-
- cmdq_buf.size = sizeof(*ctxt_block);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- IO_CMD_CLEAN_QUEUE_CTXT,
- &cmdq_buf, &out_param);
-
- if (err || out_param) {
- dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n",
- err, out_param);
-
- err = -EFAULT;
- }
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
-
- return err;
-}
-
-static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io)
-{
- /* clean LRO/TSO context space */
- return (hinic_clean_queue_offload_ctxt(func_to_io,
- HINIC_QP_CTXT_TYPE_SQ) ||
- hinic_clean_queue_offload_ctxt(func_to_io,
- HINIC_QP_CTXT_TYPE_RQ));
-}
-
-/**
- * init_qp - Initialize a Queue Pair
- * @func_to_io: func to io channel that holds the IO components
- * @qp: pointer to the qp to initialize
- * @q_id: the id of the qp
- * @sq_msix_entry: msix entry for sq
- * @rq_msix_entry: msix entry for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_qp(struct hinic_func_to_io *func_to_io,
- struct hinic_qp *qp, int q_id,
- struct msix_entry *sq_msix_entry,
- struct msix_entry *rq_msix_entry)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- void __iomem *db_base;
- int err;
-
- qp->q_id = q_id;
-
- err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
- HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
- func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
- return err;
- }
-
- err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
- HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
- func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
- goto err_rq_alloc;
- }
-
- db_base = get_db_area(func_to_io);
- if (IS_ERR(db_base)) {
- dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
- err = PTR_ERR(db_base);
- goto err_get_db;
- }
-
- func_to_io->sq_db[q_id] = db_base;
-
- qp->sq.qid = q_id;
- err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
- sq_msix_entry,
- CI_ADDR(func_to_io->ci_addr_base, q_id),
- CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
- if (err) {
- dev_err(&pdev->dev, "Failed to init SQ\n");
- goto err_sq_init;
- }
-
- qp->rq.qid = q_id;
- err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
- rq_msix_entry);
- if (err) {
- dev_err(&pdev->dev, "Failed to init RQ\n");
- goto err_rq_init;
- }
-
- return 0;
-
-err_rq_init:
- hinic_clean_sq(&qp->sq);
-
-err_sq_init:
- return_db_area(func_to_io, db_base);
-
-err_get_db:
- hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
-
-err_rq_alloc:
- hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
- return err;
-}
-
-/**
- * destroy_qp - Clean the resources of a Queue Pair
- * @func_to_io: func to io channel that holds the IO components
- * @qp: pointer to the qp to clean
- **/
-static void destroy_qp(struct hinic_func_to_io *func_to_io,
- struct hinic_qp *qp)
-{
- int q_id = qp->q_id;
-
- hinic_clean_rq(&qp->rq);
- hinic_clean_sq(&qp->sq);
-
- return_db_area(func_to_io, func_to_io->sq_db[q_id]);
-
- hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
- hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
-}
-
-/**
- * hinic_io_create_qps - Create Queue Pairs
- * @func_to_io: func to io channel that holds the IO components
- * @base_qpn: base qp number
- * @num_qps: number queue pairs to create
- * @sq_msix_entries: msix entries for sq
- * @rq_msix_entries: msix entries for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
- u16 base_qpn, int num_qps,
- struct msix_entry *sq_msix_entries,
- struct msix_entry *rq_msix_entries)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- void *ci_addr_base;
- int i, j, err;
-
- func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps,
- sizeof(*func_to_io->qps), GFP_KERNEL);
- if (!func_to_io->qps)
- return -ENOMEM;
-
- func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps,
- sizeof(*func_to_io->sq_wq), GFP_KERNEL);
- if (!func_to_io->sq_wq) {
- err = -ENOMEM;
- goto err_sq_wq;
- }
-
- func_to_io->rq_wq = devm_kcalloc(&pdev->dev, num_qps,
- sizeof(*func_to_io->rq_wq), GFP_KERNEL);
- if (!func_to_io->rq_wq) {
- err = -ENOMEM;
- goto err_rq_wq;
- }
-
- func_to_io->sq_db = devm_kcalloc(&pdev->dev, num_qps,
- sizeof(*func_to_io->sq_db), GFP_KERNEL);
- if (!func_to_io->sq_db) {
- err = -ENOMEM;
- goto err_sq_db;
- }
-
- ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- &func_to_io->ci_dma_base,
- GFP_KERNEL);
- if (!ci_addr_base) {
- dev_err(&pdev->dev, "Failed to allocate CI area\n");
- err = -ENOMEM;
- goto err_ci_base;
- }
-
- func_to_io->ci_addr_base = ci_addr_base;
-
- for (i = 0; i < num_qps; i++) {
- err = init_qp(func_to_io, &func_to_io->qps[i], i,
- &sq_msix_entries[i], &rq_msix_entries[i]);
- if (err) {
- dev_err(&pdev->dev, "Failed to create QP %d\n", i);
- goto err_init_qp;
- }
- }
-
- err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
- if (err) {
- dev_err(&pdev->dev, "Failed to init QP ctxts\n");
- goto err_write_qp_ctxts;
- }
-
- err = hinic_clean_qp_offload_ctxt(func_to_io);
- if (err) {
- dev_err(&pdev->dev, "Failed to clean QP contexts space\n");
- goto err_write_qp_ctxts;
- }
-
- return 0;
-
-err_write_qp_ctxts:
-err_init_qp:
- for (j = 0; j < i; j++)
- destroy_qp(func_to_io, &func_to_io->qps[j]);
-
- dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- func_to_io->ci_addr_base, func_to_io->ci_dma_base);
-
-err_ci_base:
- devm_kfree(&pdev->dev, func_to_io->sq_db);
-
-err_sq_db:
- devm_kfree(&pdev->dev, func_to_io->rq_wq);
-
-err_rq_wq:
- devm_kfree(&pdev->dev, func_to_io->sq_wq);
-
-err_sq_wq:
- devm_kfree(&pdev->dev, func_to_io->qps);
- return err;
-}
-
-/**
- * hinic_io_destroy_qps - Destroy the IO Queue Pairs
- * @func_to_io: func to io channel that holds the IO components
- * @num_qps: number queue pairs to destroy
- **/
-void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t ci_table_size;
- int i;
-
- ci_table_size = CI_TABLE_SIZE(num_qps);
-
- for (i = 0; i < num_qps; i++)
- destroy_qp(func_to_io, &func_to_io->qps[i]);
-
- dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
- func_to_io->ci_dma_base);
-
- devm_kfree(&pdev->dev, func_to_io->sq_db);
-
- devm_kfree(&pdev->dev, func_to_io->rq_wq);
- devm_kfree(&pdev->dev, func_to_io->sq_wq);
-
- devm_kfree(&pdev->dev, func_to_io->qps);
-}
-
-int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
- u32 page_size)
-{
- struct hinic_wq_page_size page_size_info = {0};
- u16 out_size = sizeof(page_size_info);
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- page_size_info.func_idx = func_idx;
- page_size_info.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
- page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_PAGESIZE_SET, &page_size_info,
- sizeof(page_size_info), &page_size_info,
- &out_size, HINIC_MGMT_MSG_SYNC);
- if (err || !out_size || page_size_info.status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
- err, page_size_info.status, out_size);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_io_init - Initialize the IO components
- * @func_to_io: func to io channel that holds the IO components
- * @hwif: HW interface for accessing IO
- * @max_qps: maximum QPs in HW
- * @num_ceqs: number completion event queues
- * @ceq_msix_entries: msix entries for ceqs
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_io_init(struct hinic_func_to_io *func_to_io,
- struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
- struct msix_entry *ceq_msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_cmdq_type cmdq, type;
- void __iomem *db_area;
- int err;
-
- func_to_io->hwif = hwif;
- func_to_io->qps = NULL;
- func_to_io->max_qps = max_qps;
- func_to_io->ceqs.hwdev = func_to_io->hwdev;
-
- err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
- HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
- ceq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init CEQs\n");
- return err;
- }
-
- err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
- goto err_wqs_alloc;
- }
-
- func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
- if (!func_to_io->db_base) {
- dev_err(&pdev->dev, "Failed to remap IO DB area\n");
- err = -ENOMEM;
- goto err_db_ioremap;
- }
-
- init_db_area_idx(&func_to_io->free_db_area);
-
- for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
- db_area = get_db_area(func_to_io);
- if (IS_ERR(db_area)) {
- dev_err(&pdev->dev, "Failed to get cmdq db area\n");
- err = PTR_ERR(db_area);
- goto err_db_area;
- }
-
- func_to_io->cmdq_db_area[cmdq] = db_area;
- }
-
- err = hinic_set_wq_page_size(func_to_io->hwdev,
- HINIC_HWIF_FUNC_IDX(hwif),
- HINIC_DEFAULT_WQ_PAGE_SIZE);
- if (err) {
- dev_err(&func_to_io->hwif->pdev->dev, "Failed to set wq page size\n");
- goto init_wq_pg_size_err;
- }
-
- err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
- func_to_io->cmdq_db_area);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
- goto err_init_cmdqs;
- }
-
- return 0;
-
-err_init_cmdqs:
- if (!HINIC_IS_VF(func_to_io->hwif))
- hinic_set_wq_page_size(func_to_io->hwdev,
- HINIC_HWIF_FUNC_IDX(hwif),
- HINIC_HW_WQ_PAGE_SIZE);
-init_wq_pg_size_err:
-err_db_area:
- for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
- return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
-
- iounmap(func_to_io->db_base);
-
-err_db_ioremap:
- hinic_wqs_free(&func_to_io->wqs);
-
-err_wqs_alloc:
- hinic_ceqs_free(&func_to_io->ceqs);
- return err;
-}
-
-/**
- * hinic_io_free - Free the IO components
- * @func_to_io: func to io channel that holds the IO components
- **/
-void hinic_io_free(struct hinic_func_to_io *func_to_io)
-{
- enum hinic_cmdq_type cmdq;
-
- hinic_free_cmdqs(&func_to_io->cmdqs);
-
- if (!HINIC_IS_VF(func_to_io->hwif))
- hinic_set_wq_page_size(func_to_io->hwdev,
- HINIC_HWIF_FUNC_IDX(func_to_io->hwif),
- HINIC_HW_WQ_PAGE_SIZE);
-
- for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
- return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
-
- iounmap(func_to_io->db_base);
- hinic_wqs_free(&func_to_io->wqs);
- hinic_ceqs_free(&func_to_io->ceqs);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
deleted file mode 100644
index 52159a90278a..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_IO_H
-#define HINIC_HW_IO_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/semaphore.h>
-#include <linux/sizes.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_qp.h"
-
-#define HINIC_DB_PAGE_SIZE SZ_4K
-#define HINIC_DB_SIZE SZ_4M
-#define HINIC_HW_WQ_PAGE_SIZE SZ_4K
-#define HINIC_DEFAULT_WQ_PAGE_SIZE SZ_256K
-
-#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE)
-
-enum hinic_db_type {
- HINIC_DB_CMDQ_TYPE,
- HINIC_DB_SQ_TYPE,
-};
-
-enum hinic_io_path {
- HINIC_CTRL_PATH,
- HINIC_DATA_PATH,
-};
-
-struct hinic_free_db_area {
- int db_idx[HINIC_DB_MAX_AREAS];
-
- int alloc_pos;
- int return_pos;
-
- int num_free;
-
- /* Lock for getting db area */
- struct semaphore idx_lock;
-};
-
-struct hinic_nic_cfg {
- /* lock for getting nic cfg */
- struct mutex cfg_mutex;
- bool pause_set;
- u32 auto_neg;
- u32 rx_pause;
- u32 tx_pause;
-};
-
-struct hinic_func_to_io {
- struct hinic_hwif *hwif;
- struct hinic_hwdev *hwdev;
- u16 global_qpn;
- struct hinic_ceqs ceqs;
-
- struct hinic_wqs wqs;
-
- struct hinic_wq *sq_wq;
- struct hinic_wq *rq_wq;
-
- struct hinic_qp *qps;
- u16 max_qps;
-
- u16 sq_depth;
- u16 rq_depth;
-
- void __iomem **sq_db;
- void __iomem *db_base;
-
- void *ci_addr_base;
- dma_addr_t ci_dma_base;
-
- struct hinic_free_db_area free_db_area;
-
- void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES];
-
- struct hinic_cmdqs cmdqs;
-
- u16 max_vfs;
- struct vf_data_storage *vf_infos;
- u8 link_status;
- struct hinic_nic_cfg nic_cfg;
-};
-
-struct hinic_wq_page_size {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 ppf_idx;
- u8 page_size;
-
- u32 rsvd1;
-};
-
-int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
- u32 page_size);
-
-int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
- u16 base_qpn, int num_qps,
- struct msix_entry *sq_msix_entries,
- struct msix_entry *rq_msix_entries);
-
-void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io,
- int num_qps);
-
-int hinic_io_init(struct hinic_func_to_io *func_to_io,
- struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
- struct msix_entry *ceq_msix_entries);
-
-void hinic_io_free(struct hinic_func_to_io *func_to_io);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
deleted file mode 100644
index 33ac7814d3b3..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_MBOX_H_
-#define HINIC_MBOX_H_
-
-#define HINIC_MBOX_PF_SEND_ERR 0x1
-#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2
-#define HINIC_MBOX_VF_CMD_ERROR 0x3
-
-#define HINIC_MAX_FUNCTIONS 512
-
-#define HINIC_MAX_PF_FUNCS 16
-
-#define HINIC_MBOX_WQ_NAME "hinic_mbox"
-
-#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80
-#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100
-#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104
-#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108
-#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C
-
-#define MAX_FUNCTION_NUM 512
-
-struct vf_cmd_check_handle {
- u8 cmd;
- bool (*check_cmd)(struct hinic_hwdev *hwdev, u16 src_func_idx,
- void *buf_in, u16 in_size);
-};
-
-enum hinic_mbox_ack_type {
- MBOX_ACK,
- MBOX_NO_ACK,
-};
-
-struct mbox_msg_info {
- u8 msg_id;
- u8 status;
-};
-
-struct hinic_recv_mbox {
- struct completion recv_done;
- void *mbox;
- u8 cmd;
- enum hinic_mod_type mod;
- u16 mbox_len;
- void *buf_out;
- enum hinic_mbox_ack_type ack_type;
- struct mbox_msg_info msg_info;
- u8 seq_id;
- atomic_t msg_cnt;
-};
-
-struct hinic_send_mbox {
- struct completion send_done;
- u8 *data;
-
- u64 *wb_status;
- void *wb_vaddr;
- dma_addr_t wb_paddr;
-};
-
-typedef void (*hinic_vf_mbox_cb)(void *handle, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size);
-typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size);
-
-enum mbox_event_state {
- EVENT_START = 0,
- EVENT_FAIL,
- EVENT_TIMEOUT,
- EVENT_END,
-};
-
-enum hinic_mbox_cb_state {
- HINIC_VF_MBOX_CB_REG = 0,
- HINIC_VF_MBOX_CB_RUNNING,
- HINIC_PF_MBOX_CB_REG,
- HINIC_PF_MBOX_CB_RUNNING,
- HINIC_PPF_MBOX_CB_REG,
- HINIC_PPF_MBOX_CB_RUNNING,
- HINIC_PPF_TO_PF_MBOX_CB_REG,
- HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
-};
-
-struct hinic_mbox_func_to_func {
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
-
- struct semaphore mbox_send_sem;
- struct semaphore msg_send_sem;
- struct hinic_send_mbox send_mbox;
-
- struct workqueue_struct *workq;
-
- struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS];
- struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS];
-
- hinic_vf_mbox_cb vf_mbox_cb[HINIC_MOD_MAX];
- hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX];
- unsigned long pf_mbox_cb_state[HINIC_MOD_MAX];
- unsigned long vf_mbox_cb_state[HINIC_MOD_MAX];
-
- u8 send_msg_id;
- enum mbox_event_state event_flag;
-
- /* lock for mbox event flag */
- spinlock_t mbox_lock;
-
- u32 vf_mbx_old_rand_id[MAX_FUNCTION_NUM];
- u32 vf_mbx_rand_id[MAX_FUNCTION_NUM];
- bool support_vf_random;
-};
-
-struct hinic_mbox_work {
- struct work_struct work;
- u16 src_func_idx;
- struct hinic_mbox_func_to_func *func_to_func;
- struct hinic_recv_mbox *recv_mbox;
-};
-
-struct vf_cmd_msg_handle {
- u8 cmd;
- int (*cmd_msg_handler)(void *hwdev, u16 vf_id,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size);
-};
-
-bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
- void *buf_in, u16 in_size);
-
-bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
- struct vf_cmd_check_handle *cmd_handle,
- u16 vf_id, u8 cmd, void *buf_in,
- u16 in_size, u8 size);
-
-int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
- enum hinic_mod_type mod,
- hinic_pf_mbox_cb callback);
-
-int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
- enum hinic_mod_type mod,
- hinic_vf_mbox_cb callback);
-
-void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
- enum hinic_mod_type mod);
-
-void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
- enum hinic_mod_type mod);
-
-int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
-
-void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
-
-int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
- u8 cmd, void *buf_in, u16 in_size, void *buf_out,
- u16 *out_size, u32 timeout);
-
-int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
- enum hinic_mod_type mod, u16 cmd, u16 dst_func,
- void *buf_in, u16 in_size, void *buf_out,
- u16 *out_size, u32 timeout);
-
-int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
- enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
-
-int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
deleted file mode 100644
index 4aa1f433ed24..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ /dev/null
@@ -1,681 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/semaphore.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <net/devlink.h>
-#include <asm/barrier.h>
-
-#include "hinic_devlink.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_api_cmd.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_dev.h"
-
-#define SYNC_MSG_ID_MASK 0x1FF
-
-#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
-
-#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
- ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \
- SYNC_MSG_ID_MASK))
-
-#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN)
-
-#define MGMT_MSG_LEN_MIN 20
-#define MGMT_MSG_LEN_STEP 16
-#define MGMT_MSG_RSVD_FOR_DEV 8
-
-#define SEGMENT_LEN 48
-
-#define MAX_PF_MGMT_BUF_SIZE 2048
-
-/* Data should be SEG LEN size aligned */
-#define MAX_MSG_LEN 2016
-
-#define MSG_NOT_RESP 0xFFFF
-
-#define MGMT_MSG_TIMEOUT 5000
-
-#define SET_FUNC_PORT_MBOX_TIMEOUT 30000
-
-#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
-
-#define UPDATE_FW_MGMT_TIMEOUT 20000
-
-#define mgmt_to_pfhwdev(pf_mgmt) \
- container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
-
-enum msg_segment_type {
- NOT_LAST_SEGMENT = 0,
- LAST_SEGMENT = 1,
-};
-
-enum mgmt_direction_type {
- MGMT_DIRECT_SEND = 0,
- MGMT_RESP = 1,
-};
-
-enum msg_ack_type {
- MSG_ACK = 0,
- MSG_NO_ACK = 1,
-};
-
-/**
- * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that this handler will handle its messages
- * @handle: private data for the callback
- * @callback: the handler that will handle messages
- **/
-void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod,
- void *handle,
- void (*callback)(void *handle,
- u8 cmd, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size))
-{
- struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
-
- mgmt_cb->cb = callback;
- mgmt_cb->handle = handle;
- mgmt_cb->state = HINIC_MGMT_CB_ENABLED;
-}
-
-/**
- * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that this handler handles its messages
- **/
-void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod)
-{
- struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
-
- mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED;
-
- while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING)
- schedule();
-
- mgmt_cb->cb = NULL;
-}
-
-/**
- * prepare_header - prepare the header of the message
- * @pf_to_mgmt: PF to MGMT channel
- * @msg_len: the length of the message
- * @mod: module in the chip that will get the message
- * @ack_type: ask for response
- * @direction: the direction of the message
- * @cmd: command of the message
- * @msg_id: message id
- *
- * Return the prepared header value
- **/
-static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt,
- u16 msg_len, enum hinic_mod_type mod,
- enum msg_ack_type ack_type,
- enum mgmt_direction_type direction,
- u16 cmd, u16 msg_id)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
-
- return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
- HINIC_MSG_HEADER_SET(mod, MODULE) |
- HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) |
- HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
- HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
- HINIC_MSG_HEADER_SET(0, SEQID) |
- HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
- HINIC_MSG_HEADER_SET(direction, DIRECTION) |
- HINIC_MSG_HEADER_SET(cmd, CMD) |
- HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) |
- HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) |
- HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
-}
-
-/**
- * prepare_mgmt_cmd - prepare the mgmt command
- * @mgmt_cmd: pointer to the command to prepare
- * @header: pointer of the header for the message
- * @msg: the data of the message
- * @msg_len: the length of the message
- **/
-static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len)
-{
- memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
-
- mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
- memcpy(mgmt_cmd, header, sizeof(*header));
-
- mgmt_cmd += sizeof(*header);
- memcpy(mgmt_cmd, msg, msg_len);
-}
-
-/**
- * mgmt_msg_len - calculate the total message length
- * @msg_data_len: the length of the message data
- *
- * Return the total message length
- **/
-static u16 mgmt_msg_len(u16 msg_data_len)
-{
- /* RSVD + HEADER_SIZE + DATA_LEN */
- u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len;
-
- if (msg_len > MGMT_MSG_LEN_MIN)
- msg_len = MGMT_MSG_LEN_MIN +
- ALIGN((msg_len - MGMT_MSG_LEN_MIN),
- MGMT_MSG_LEN_STEP);
- else
- msg_len = MGMT_MSG_LEN_MIN;
-
- return msg_len;
-}
-
-/**
- * send_msg_to_mgmt - send message to mgmt by API CMD
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @data: the msg data
- * @data_len: the msg data length
- * @ack_type: ask for response
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *data, u16 data_len,
- enum msg_ack_type ack_type,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- struct hinic_api_cmd_chain *chain;
- u64 header;
- u16 msg_id;
-
- msg_id = SYNC_MSG_ID(pf_to_mgmt);
-
- if (direction == MGMT_RESP) {
- header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
- direction, cmd, resp_msg_id);
- } else {
- SYNC_MSG_ID_INC(pf_to_mgmt);
- header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
- direction, cmd, msg_id);
- }
-
- prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len);
-
- chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU];
- return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT,
- pf_to_mgmt->sync_msg_buf,
- mgmt_msg_len(data_len));
-}
-
-/**
- * msg_to_mgmt_sync - send sync message to mgmt
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @buf_out: response
- * @out_size: response length
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- * @timeout: time-out period of waiting for response
- *
- * Return 0 - Success, negative - Failure
- **/
-static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *buf_in, u16 in_size,
- u8 *buf_out, u16 *out_size,
- enum mgmt_direction_type direction,
- u16 resp_msg_id, u32 timeout)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_recv_msg *recv_msg;
- struct completion *recv_done;
- unsigned long timeo;
- u16 msg_id;
- int err;
-
- /* Lock the sync_msg_buf */
- down(&pf_to_mgmt->sync_msg_lock);
-
- recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
- recv_done = &recv_msg->recv_done;
-
- if (resp_msg_id == MSG_NOT_RESP)
- msg_id = SYNC_MSG_ID(pf_to_mgmt);
- else
- msg_id = resp_msg_id;
-
- init_completion(recv_done);
-
- err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
- MSG_ACK, direction, resp_msg_id);
- if (err) {
- dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n");
- goto unlock_sync_msg;
- }
-
- timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
-
- if (!wait_for_completion_timeout(recv_done, timeo)) {
- dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
- hinic_dump_aeq_info(pf_to_mgmt->hwdev);
- err = -ETIMEDOUT;
- goto unlock_sync_msg;
- }
-
- smp_rmb(); /* verify reading after completion */
-
- if (recv_msg->msg_id != msg_id) {
- dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id);
- err = -EFAULT;
- goto unlock_sync_msg;
- }
-
- if (buf_out && recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE) {
- memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
- *out_size = recv_msg->msg_len;
- }
-
-unlock_sync_msg:
- up(&pf_to_mgmt->sync_msg_lock);
- return err;
-}
-
-/**
- * msg_to_mgmt_async - send message to mgmt without response
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *buf_in, u16 in_size,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- int err;
-
- /* Lock the sync_msg_buf */
- down(&pf_to_mgmt->sync_msg_lock);
-
- err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
- MSG_NO_ACK, direction, resp_msg_id);
-
- up(&pf_to_mgmt->sync_msg_lock);
- return err;
-}
-
-/**
- * hinic_msg_to_mgmt - send message to mgmt
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @buf_out: response
- * @out_size: returned response length
- * @sync: sync msg or async msg
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
- enum hinic_mgmt_msg_type sync)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u32 timeout = 0;
-
- if (sync != HINIC_MGMT_MSG_SYNC) {
- dev_err(&pdev->dev, "Invalid MGMT msg type\n");
- return -EINVAL;
- }
-
- if (!MSG_SZ_IS_VALID(in_size)) {
- dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n");
- return -EINVAL;
- }
-
- if (HINIC_IS_VF(hwif)) {
- if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
- timeout = SET_FUNC_PORT_MBOX_TIMEOUT;
-
- return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
- in_size, buf_out, out_size, timeout);
- } else {
- if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
- timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
- else if (cmd == HINIC_PORT_CMD_UPDATE_FW)
- timeout = UPDATE_FW_MGMT_TIMEOUT;
-
- return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
- buf_out, out_size, MGMT_DIRECT_SEND,
- MSG_NOT_RESP, timeout);
- }
-}
-
-static void recv_mgmt_msg_work_handler(struct work_struct *work)
-{
- struct hinic_mgmt_msg_handle_work *mgmt_work =
- container_of(work, struct hinic_mgmt_msg_handle_work, work);
- struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt;
- struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
- u8 *buf_out = pf_to_mgmt->mgmt_ack_buf;
- struct hinic_mgmt_cb *mgmt_cb;
- unsigned long cb_state;
- u16 out_size = 0;
-
- memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
-
- if (mgmt_work->mod >= HINIC_MOD_MAX) {
- dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
- mgmt_work->mod);
- kfree(mgmt_work->msg);
- kfree(mgmt_work);
- return;
- }
-
- mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod];
-
- cb_state = cmpxchg(&mgmt_cb->state,
- HINIC_MGMT_CB_ENABLED,
- HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
-
- if (cb_state == HINIC_MGMT_CB_ENABLED && mgmt_cb->cb)
- mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
- mgmt_work->msg, mgmt_work->msg_len,
- buf_out, &out_size);
- else
- dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
- mgmt_work->mod, mgmt_work->cmd);
-
- mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
-
- if (!mgmt_work->async_mgmt_to_pf)
- /* MGMT sent sync msg, send the response */
- msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd,
- buf_out, out_size, MGMT_RESP,
- mgmt_work->msg_id);
-
- kfree(mgmt_work->msg);
- kfree(mgmt_work);
-}
-
-/**
- * mgmt_recv_msg_handler - handler for message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- struct hinic_mgmt_msg_handle_work *mgmt_work = NULL;
-
- mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
- if (!mgmt_work)
- return;
-
- if (recv_msg->msg_len) {
- mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
- if (!mgmt_work->msg) {
- kfree(mgmt_work);
- return;
- }
- }
-
- mgmt_work->pf_to_mgmt = pf_to_mgmt;
- mgmt_work->msg_len = recv_msg->msg_len;
- memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
- mgmt_work->msg_id = recv_msg->msg_id;
- mgmt_work->mod = recv_msg->mod;
- mgmt_work->cmd = recv_msg->cmd;
- mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
-
- INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
- queue_work(pf_to_mgmt->workq, &mgmt_work->work);
-}
-
-/**
- * mgmt_resp_msg_handler - handler for a response message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- wmb(); /* verify writing all, before reading */
-
- complete(&recv_msg->recv_done);
-}
-
-/**
- * recv_mgmt_msg_handler - handler for a message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @header: the header of the message
- * @recv_msg: received message details
- **/
-static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- u64 *header, struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int seq_id, seg_len;
- u8 *msg_body;
-
- seq_id = HINIC_MSG_HEADER_GET(*header, SEQID);
- seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN);
-
- if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) {
- dev_err(&pdev->dev, "recv big mgmt msg\n");
- return;
- }
-
- msg_body = (u8 *)header + sizeof(*header);
- memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len);
-
- if (!HINIC_MSG_HEADER_GET(*header, LAST))
- return;
-
- recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD);
- recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE);
- recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header,
- ASYNC_MGMT_TO_PF);
- recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN);
- recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID);
-
- if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP)
- mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
- else
- mgmt_recv_msg_handler(pf_to_mgmt, recv_msg);
-}
-
-/**
- * mgmt_msg_aeqe_handler - handler for a mgmt message event
- * @handle: PF to MGMT channel
- * @data: the header of the message
- * @size: unused
- **/
-static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size)
-{
- struct hinic_pf_to_mgmt *pf_to_mgmt = handle;
- struct hinic_recv_msg *recv_msg;
- u64 *header = (u64 *)data;
-
- recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) ==
- MGMT_DIRECT_SEND ?
- &pf_to_mgmt->recv_msg_from_mgmt :
- &pf_to_mgmt->recv_resp_msg_from_mgmt;
-
- recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
-}
-
-/**
- * alloc_recv_msg - allocate receive message memory
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: pointer that will hold the allocated data
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!recv_msg->msg)
- return -ENOMEM;
-
- recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!recv_msg->buf_out)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = alloc_recv_msg(pf_to_mgmt,
- &pf_to_mgmt->recv_msg_from_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate recv msg\n");
- return err;
- }
-
- err = alloc_recv_msg(pf_to_mgmt,
- &pf_to_mgmt->recv_resp_msg_from_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate resp recv msg\n");
- return err;
- }
-
- pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev,
- MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!pf_to_mgmt->sync_msg_buf)
- return -ENOMEM;
-
- pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev,
- MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!pf_to_mgmt->mgmt_ack_buf)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- * @hwif: HW interface the PF to MGMT will use for accessing HW
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_hwif *hwif)
-{
- struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- pf_to_mgmt->hwif = hwif;
- pf_to_mgmt->hwdev = hwdev;
-
- if (HINIC_IS_VF(hwif))
- return 0;
-
- err = hinic_health_reporters_create(hwdev->devlink_dev);
- if (err)
- return err;
-
- sema_init(&pf_to_mgmt->sync_msg_lock, 1);
- pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt");
- if (!pf_to_mgmt->workq) {
- dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n");
- hinic_health_reporters_destroy(hwdev->devlink_dev);
- return -ENOMEM;
- }
- pf_to_mgmt->sync_msg_id = 0;
-
- err = alloc_msg_buf(pf_to_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
- destroy_workqueue(pf_to_mgmt->workq);
- hinic_health_reporters_destroy(hwdev->devlink_dev);
- return err;
- }
-
- err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
- destroy_workqueue(pf_to_mgmt->workq);
- hinic_health_reporters_destroy(hwdev->devlink_dev);
- return err;
- }
-
- hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU,
- pf_to_mgmt,
- mgmt_msg_aeqe_handler);
- return 0;
-}
-
-/**
- * hinic_pf_to_mgmt_free - free PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- **/
-void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
-{
- struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
-
- if (HINIC_IS_VF(hwdev->hwif))
- return;
-
- hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
- hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
- destroy_workqueue(pf_to_mgmt->workq);
- hinic_health_reporters_destroy(hwdev->devlink_dev);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
index 4ca81cc838db..d9bea9c93951 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
@@ -1,181 +1,569 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
*/
-#ifndef HINIC_HW_MGMT_H
-#define HINIC_HW_MGMT_H
+#ifndef HINIC_HW_MGMT_H_
+#define HINIC_HW_MGMT_H_
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/completion.h>
-#include <linux/bitops.h>
+/* show each drivers only such as nic_service_cap,
+ * toe_service_cap structure, but not show service_cap
+ */
+enum hinic_service_type {
+ SERVICE_T_NIC = 0,
+ SERVICE_T_OVS,
+ SERVICE_T_ROCE,
+ SERVICE_T_TOE,
+ SERVICE_T_IWARP,
+ SERVICE_T_FC,
+ SERVICE_T_FCOE,
+ SERVICE_T_MIGRATE,
+ SERVICE_T_PT,
+ SERVICE_T_HWPT,
+ SERVICE_T_MAX,
+
+ /* Only used for interruption resource management,
+ * mark the request module
+ */
+ SERVICE_T_INTF = (1 << 15),
+ SERVICE_T_CQM = (1 << 16),
+};
+
+/* NIC service capability
+ * 1, The chip supports NIC RQ is 1K
+ * 2, PF/VF RQ specifications:
+ * disable RSS:
+ * disable VMDq: Each PF/VF at most 8 RQ
+ * enable the VMDq: Each PF/VF at most 1K RQ
+ * enable the RSS:
+ * disable VMDq: each PF at most 64 RQ, VF at most 32 RQ
+ * enable the VMDq: Each PF/VF at most 1K RQ
+ *
+ * 3, The chip supports NIC SQ is 1K
+ * 4, PF/VF SQ specifications:
+ * disable RSS:
+ * disable VMDq: Each PF/VF at most 8 SQ
+ * enable the VMDq: Each PF/VF at most 1K SQ
+ * enable the RSS:
+ * disable VMDq: each PF at most 64 SQ, VF at most 32 SQ
+ * enable the VMDq: Each PF/VF at most 1K SQ
+ */
+struct nic_service_cap {
+ /* PF resources */
+ u16 max_sqs;
+ u16 max_rqs;
+
+ /* VF resources, vf obtain through the MailBox mechanism from
+ * according PF
+ */
+ u16 vf_max_sqs;
+ u16 vf_max_rqs;
+ bool lro_en; /* LRO feature enable bit*/
+ u8 lro_sz; /* LRO context space: n*16B */
+ u8 tso_sz; /* TSO context space: n*16B */
+
+ u16 max_queue_allowed;
+ u16 dynamic_qp; /* support dynamic queue */
+};
+
+struct dev_roce_svc_own_cap {
+ u32 max_qps;
+ u32 max_cqs;
+ u32 max_srqs;
+ u32 max_mpts;
+
+ u32 vf_max_qps;
+ u32 vf_max_cqs;
+ u32 vf_max_srqs;
+ u32 vf_max_mpts;
+
+ u32 cmtt_cl_start;
+ u32 cmtt_cl_end;
+ u32 cmtt_cl_sz;
+
+ u32 dmtt_cl_start;
+ u32 dmtt_cl_end;
+ u32 dmtt_cl_sz;
+
+ u32 wqe_cl_start;
+ u32 wqe_cl_end;
+ u32 wqe_cl_sz;
+
+ u32 qpc_entry_sz;
+ u32 max_wqes;
+ u32 max_rq_sg;
+ u32 max_sq_inline_data_sz;
+ u32 max_rq_desc_sz;
+
+ u32 rdmarc_entry_sz;
+ u32 max_qp_init_rdma;
+ u32 max_qp_dest_rdma;
+
+ u32 max_srq_wqes;
+ u32 reserved_srqs;
+ u32 max_srq_sge;
+ u32 srqc_entry_sz;
+
+ u32 max_msg_sz; /* Message size 2GB */
+
+ u8 num_cos;
+};
+
+struct dev_iwarp_svc_own_cap {
+ u32 max_qps;
+ u32 max_cqs;
+ u32 max_mpts;
+
+ u32 vf_max_qps;
+ u32 vf_max_cqs;
+ u32 vf_max_mpts;
+
+ u32 cmtt_cl_start;
+ u32 cmtt_cl_end;
+ u32 cmtt_cl_sz;
+
+ u32 dmtt_cl_start;
+ u32 dmtt_cl_end;
+ u32 dmtt_cl_sz;
+
+ u32 wqe_cl_start;
+ u32 wqe_cl_end;
+ u32 wqe_cl_sz;
+
+ u32 max_rq_sg;
+ u32 max_sq_inline_data_sz;
+ u32 max_rq_desc_sz;
+
+ u32 max_irq_depth;
+ u32 irq_entry_size; /* 64B */
+ u32 max_orq_depth;
+ u32 orq_entry_size; /* 32B */
+ u32 max_rtoq_depth;
+ u32 rtoq_entry_size; /* 32B */
+ u32 max_ackq_depth;
+ u32 ackq_entry_size; /* 16B */
+
+ u32 max_msg_sz; /* Message size 1GB */
+
+ u32 max_wqes; /* 8K */
+ u32 qpc_entry_sz; /* 1K */
-#include "hinic_hw_if.h"
-#include "hinic_hw_api_cmd.h"
+ /* true:CQM uses static allocation;
+ * false:CQM uses dynamic allocation.
+ * Currently, only consider the QPC
+ */
+ bool alloc_flag;
-#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0
-#define HINIC_MSG_HEADER_MODULE_SHIFT 11
-#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16
-#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22
-#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
-#define HINIC_MSG_HEADER_SEQID_SHIFT 24
-#define HINIC_MSG_HEADER_LAST_SHIFT 30
-#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31
-#define HINIC_MSG_HEADER_CMD_SHIFT 32
-#define HINIC_MSG_HEADER_ZEROS_SHIFT 40
-#define HINIC_MSG_HEADER_PCI_INTF_SHIFT 48
-#define HINIC_MSG_HEADER_PF_IDX_SHIFT 50
-#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54
+ u8 num_cos;
+};
-#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF
-#define HINIC_MSG_HEADER_MODULE_MASK 0x1F
-#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F
-#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1
-#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
-#define HINIC_MSG_HEADER_SEQID_MASK 0x3F
-#define HINIC_MSG_HEADER_LAST_MASK 0x1
-#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1
-#define HINIC_MSG_HEADER_CMD_MASK 0xFF
-#define HINIC_MSG_HEADER_ZEROS_MASK 0xFF
-#define HINIC_MSG_HEADER_PCI_INTF_MASK 0x3
-#define HINIC_MSG_HEADER_PF_IDX_MASK 0xF
-#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF
+/* RDMA service capability structure */
+struct dev_rdma_svc_cap {
+ /* ROCE service unique parameter structure */
+ struct dev_roce_svc_own_cap roce_own_cap;
+ /* IWARP service unique parameter structure */
+ struct dev_iwarp_svc_own_cap iwarp_own_cap;
+};
-#define HINIC_MSG_HEADER_SET(val, member) \
- ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \
- HINIC_MSG_HEADER_##member##_SHIFT)
+/* Defines the RDMA service capability flag */
+enum {
+ RDMA_BMME_FLAG_LOCAL_INV = (1 << 0),
+ RDMA_BMME_FLAG_REMOTE_INV = (1 << 1),
+ RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2),
+ RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3),
+ RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4),
+ RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5),
+
+ RDMA_DEV_CAP_FLAG_XRC = (1 << 6),
+ RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7),
+ RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8),
+ RDMA_DEV_CAP_FLAG_APM = (1 << 9),
+};
-#define HINIC_MSG_HEADER_GET(val, member) \
- (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \
- HINIC_MSG_HEADER_##member##_MASK)
+/* RDMA services */
+struct rdma_service_cap {
+ struct dev_rdma_svc_cap dev_rdma_cap;
+
+ u8 log_mtt; /* 1. the number of MTT PA must be integer power of 2
+ * 2. represented by logarithm. Each MTT table can
+ * contain 1, 2, 4, 8, and 16 PA)
+ */
+ u8 log_rdmarc; /* 1. the number of RDMArc PA must be integer power of 2
+ * 2. represented by logarithm. Each MTT table can
+ * contain 1, 2, 4, 8, and 16 PA)
+ */
+
+ u32 reserved_qps; /* Number of reserved QP */
+ u32 max_sq_sg; /* Maximum SGE number of SQ (8) */
+ u32 max_sq_desc_sz; /* WQE maximum size of SQ(1024B), inline maximum
+ * size if 960B(944B aligned to the 960B),
+ * 960B=>wqebb alignment=>1024B
+ */
+ u32 wqebb_size; /* Currently, the supports 64B and 128B,
+ * defined as 64Bytes
+ */
+
+ u32 max_cqes; /* Size of the depth of the CQ (64K-1) */
+ u32 reserved_cqs; /* Number of reserved CQ */
+ u32 cqc_entry_sz; /* Size of the CQC (64B/128B) */
+ u32 cqe_size; /* Size of CQE (32B) */
+
+ u32 reserved_mrws; /* Number of reserved MR/MR Window */
+ u32 mpt_entry_sz; /* MPT table size (64B) */
+ u32 max_fmr_maps; /* max MAP of FMR,
+ * (1 << (32-ilog2(num_mpt)))-1;
+ */
+
+ u32 num_mtts; /* Number of MTT table (4M),
+ * is actually MTT seg number
+ */
+ /* MTT table number of Each MTT seg(3) */
+ u32 log_mtt_seg;
+ u32 mtt_entry_sz; /* MTT table size 8B, including 1 PA(64bits) */
+ u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */
+
+ /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */
+ u32 local_ca_ack_delay;
+ u32 num_ports; /* Physical port number */
+
+ u32 db_page_size; /* Size of the DB (4KB) */
+ u32 direct_wqe_size; /* Size of the DWQE (256B) */
+
+ u32 num_pds; /* Maximum number of PD (128K) */
+ u32 reserved_pds; /* Number of reserved PD*/
+ u32 max_xrcds; /* Maximum number of xrcd (64K) */
+ u32 reserved_xrcds; /* Number of reserved xrcd */
+
+ u32 max_gid_per_port; /* gid number (16) of each port */
+ u32 gid_entry_sz; /* RoCE v2 GID table is 32B,
+ * compatible RoCE v1 expansion
+ */
+
+ u32 reserved_lkey; /* local_dma_lkey */
+ u32 num_comp_vectors; /* Number of complete vector (32) */
+ u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M,4M page_size */
+
+ u32 flags; /* RDMA some identity */
+ u32 max_frpl_len; /* Maximum number of pages frmr registration */
+ u32 max_pkeys; /* Number of supported pkey group */
+};
-enum hinic_mgmt_msg_type {
- HINIC_MGMT_MSG_SYNC = 1,
+/* PF/VF FCoE service resource structure defined */
+struct dev_fcoe_svc_cap {
+ /* PF resources */
+ u32 max_qps;
+ u32 max_cqs;
+ u32 max_srqs;
+
+ /* Child Context(Task IO)
+ * For FCoE/IOE services, at most 8K
+ */
+ u32 max_cctxs;
+ u32 cctxs_id_start;
+
+ u8 vp_id_start;
+ u8 vp_id_end;
};
-enum hinic_cfg_cmd {
- HINIC_CFG_NIC_CAP = 0,
+/* FCoE services */
+struct fcoe_service_cap {
+ struct dev_fcoe_svc_cap dev_fcoe_cap;
+
+ /* SQ */
+ u32 qpc_basic_size;
+ u32 childc_basic_size;
+ u32 sqe_size;
+
+ /* SCQ */
+ u32 scqc_basic_size;
+ u32 scqe_size;
+
+ /* SRQ */
+ u32 srqc_size;
+ u32 srqe_size;
};
-enum hinic_comm_cmd {
- HINIC_COMM_CMD_START_FLR = 0x1,
- HINIC_COMM_CMD_IO_STATUS_GET = 0x3,
- HINIC_COMM_CMD_DMA_ATTR_SET = 0x4,
+/* PF/VF ToE service resource structure */
+struct dev_toe_svc_cap {
+ /* PF resources*/
+ u32 max_pctxs; /* Parent Context: max specifications 1M */
+ u32 max_cqs;
+ u32 max_srqs;
+ u32 srq_id_start;
- HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10,
- HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11,
+ u8 num_cos;
+};
+
+/* ToE services */
+struct toe_service_cap {
+ struct dev_toe_svc_cap dev_toe_cap;
- HINIC_COMM_CMD_HWCTXT_SET = 0x12,
- HINIC_COMM_CMD_HWCTXT_GET = 0x13,
+ bool alloc_flag;
+ u32 pctx_sz;/* 1KB */
+ u32 scqc_sz;/* 64B */
+};
- HINIC_COMM_CMD_SQ_HI_CI_SET = 0x14,
+/* PF FC service resource structure defined */
+struct dev_fc_svc_cap {
+ /* PF Parent QPC */
+ u32 max_parent_qpc_num; /* max number is 2048 */
- HINIC_COMM_CMD_RES_STATE_SET = 0x24,
+ /* PF Child QPC */
+ u32 max_child_qpc_num; /* max number is 2048 */
+ u32 child_qpc_id_start;
- HINIC_COMM_CMD_IO_RES_CLEAR = 0x29,
+ /* PF SCQ */
+ u32 scq_num; /* 16 */
- HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
+ /* PF supports SRQ*/
+ u32 srq_num; /* Number of SRQ is 2 */
- HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP,
- HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP,
+ u8 vp_id_start;
+ u8 vp_id_end;
+};
- HINIC_COMM_CMD_FAULT_REPORT = 0x37,
+/* FC services*/
+struct fc_service_cap {
+ struct dev_fc_svc_cap dev_fc_cap;
- HINIC_COMM_CMD_SET_LED_STATUS = 0x4a,
+ /* Parent QPC */
+ u32 parent_qpc_size; /* 256B */
- HINIC_COMM_CMD_L2NIC_RESET = 0x4b,
+ /* Child QPC */
+ u32 child_qpc_size; /* 256B */
- HINIC_COMM_CMD_PAGESIZE_SET = 0x50,
+ /* SQ */
+ u32 sqe_size; /* 128B(in linked list mode) */
- HINIC_COMM_CMD_GET_BOARD_INFO = 0x52,
+ /* SCQ */
+ u32 scqc_size; /* Size of the Context 32B */
+ u32 scqe_size; /* 64B */
- HINIC_COMM_CMD_WATCHDOG_INFO = 0x56,
+ /* SRQ */
+ u32 srqc_size; /* Size of SRQ Context (64B) */
+ u32 srqe_size; /* 32B */
+};
- HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61,
+/* PF OVS service resource structure defined */
+struct dev_ovs_svc_cap {
+ /* PF resources */
+ u32 max_pctxs; /* Parent Context: max specifications 1M */
+ u32 max_cqs;
+ u8 dynamic_qp_en;
- HINIC_COMM_CMD_MAX,
+ /* VF resources */
+ u32 vf_max_pctxs; /* Parent Context: max specifications 1M */
+ u32 vf_max_cqs;
};
-enum hinic_mgmt_cb_state {
- HINIC_MGMT_CB_ENABLED = BIT(0),
- HINIC_MGMT_CB_RUNNING = BIT(1),
+/* OVS services */
+struct ovs_service_cap {
+ struct dev_ovs_svc_cap dev_ovs_cap;
+
+ bool alloc_flag;
+ u32 pctx_sz; /* 512B */
+ u32 scqc_sz; /* 64B */
};
-struct hinic_recv_msg {
- u8 *msg;
- u8 *buf_out;
+/* PF ACL service resource structure */
+struct dev_acl_svc_cap {
+ /* PF resources */
+ u32 max_pctxs; /* Parent Context: max specifications 1M */
+ u32 max_cqs;
- struct completion recv_done;
+ /* VF resources */
+ u32 vf_max_pctxs; /* Parent Context: max specifications 1M */
+ u32 vf_max_cqs;
+};
- u16 cmd;
- enum hinic_mod_type mod;
- int async_mgmt_to_pf;
+/* ACL services */
+struct acl_service_cap {
+ struct dev_acl_svc_cap dev_acl_cap;
- u16 msg_len;
- u16 msg_id;
+ bool alloc_flag;
+ u32 pctx_sz; /* 512B */
+ u32 scqc_sz; /* 64B */
};
-struct hinic_mgmt_cb {
- void (*cb)(void *handle, u8 cmd,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size);
+enum hinic_chip_mode {
+ CHIP_MODE_NORMAL,
+ CHIP_MODE_BMGW,
+ CHIP_MODE_VMGW,
+};
- void *handle;
- unsigned long state;
+bool hinic_support_nic(void *hwdev, struct nic_service_cap *cap);
+bool hinic_support_roce(void *hwdev, struct rdma_service_cap *cap);
+bool hinic_support_fcoe(void *hwdev, struct fcoe_service_cap *cap);
+/* PPF support,PF not support */
+bool hinic_support_toe(void *hwdev, struct toe_service_cap *cap);
+bool hinic_support_iwarp(void *hwdev, struct rdma_service_cap *cap);
+bool hinic_support_fc(void *hwdev, struct fc_service_cap *cap);
+bool hinic_support_fic(void *hwdev);
+bool hinic_support_ovs(void *hwdev, struct ovs_service_cap *cap);
+bool hinic_support_acl(void *hwdev, struct acl_service_cap *cap);
+bool hinic_support_rdma(void *hwdev, struct rdma_service_cap *cap);
+bool hinic_support_ft(void *hwdev);
+bool hinic_func_for_mgmt(void *hwdev);
+bool hinic_support_dynamic_q(void *hwdev);
+
+int hinic_set_toe_enable(void *hwdev, bool enable);
+bool hinic_get_toe_enable(void *hwdev);
+int hinic_set_fcoe_enable(void *hwdev, bool enable);
+bool hinic_get_fcoe_enable(void *hwdev);
+bool hinic_get_stateful_enable(void *hwdev);
+
+/* Service interface for obtaining service_cap public fields */
+/* Obtain service_cap.host_oq_id_mask_val */
+u8 hinic_host_oq_id_mask(void *hwdev);
+u8 hinic_host_id(void *hwdev);/* Obtain service_cap.host_id */
+/* Obtain service_cap.host_total_function */
+u16 hinic_host_total_func(void *hwdev);
+/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */
+u16 hinic_func_max_nic_qnum(void *hwdev);
+/* Obtain service_cap.dev_cap.max_sqs */
+u16 hinic_func_max_qnum(void *hwdev);
+u8 hinic_ep_id(void *hwdev);/* Obtain service_cap.ep_id */
+u8 hinic_er_id(void *hwdev);/* Obtain service_cap.er_id */
+u8 hinic_physical_port_id(void *hwdev);/* Obtain service_cap.port_id */
+u8 hinic_func_max_vf(void *hwdev);/* Obtain service_cap.max_vf */
+u32 hinic_func_pf_num(void *hwdev);/* Obtain service_cap.pf_num */
+u8 hinic_max_num_cos(void *hwdev);
+u8 hinic_cos_valid_bitmap(void *hwdev);
+u8 hinic_net_port_mode(void *hwdev);/* Obtain service_cap.net_port_mode */
+
+/* The following information is obtained from the bar space
+ * which is recorded by SDK layer.
+ * Here provide parameter query interface for service
+ */
+/* func_attr.glb_func_idx, global function index */
+u16 hinic_global_func_id(void *hwdev);
+/* func_attr.intr_num, MSI-X table entry in function */
+u16 hinic_intr_num(void *hwdev);
+enum intr_type {
+ INTR_TYPE_MSIX,
+ INTR_TYPE_MSI,
+ INTR_TYPE_INT,
+ INTR_TYPE_NONE,
+ /* PXE,OVS need single thread processing,
+ * synchronization messages must use poll wait mechanism interface
+ */
};
-struct hinic_pf_to_mgmt {
- struct hinic_hwif *hwif;
- struct hinic_hwdev *hwdev;
- struct semaphore sync_msg_lock;
- u16 sync_msg_id;
- u8 *sync_msg_buf;
- void *mgmt_ack_buf;
+enum intr_type hinic_intr_type(void *hwdev);
- struct hinic_recv_msg recv_resp_msg_from_mgmt;
- struct hinic_recv_msg recv_msg_from_mgmt;
+u8 hinic_pf_id_of_vf(void *hwdev); /* func_attr.p2p_idx, belongs to which pf */
+u8 hinic_pcie_itf_id(void *hwdev); /* func_attr.itf_idx, pcie interface index */
+u8 hinic_vf_in_pf(void *hwdev); /* func_attr.vf_in_pf, the vf offset in pf */
+enum func_type {
+ TYPE_PF,
+ TYPE_VF,
+ TYPE_PPF,
+ TYPE_UNKNOWN,
+};
+
+/* func_attr.func_type, 0-PF 1-VF 2-PPF */
+enum func_type hinic_func_type(void *hwdev);
- struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
+u8 hinic_ceq_num(void *hwdev); /* func_attr.ceq_num, ceq num in one function */
+/* func_attr.dma_attr_entry_num, dma attribute entry num */
+u8 hinic_dma_attr_entry_num(void *hwdev);
+/* The PF func_attr.glb_pf_vf_offset,
+ * PF use only
+ */
+u16 hinic_glb_pf_vf_offset(void *hwdev);
+/* func_attr.mpf_idx, mpf global function index,
+ * This value is valid only when it is PF
+ */
+u8 hinic_mpf_idx(void *hwdev);
+u8 hinic_ppf_idx(void *hwdev);
- struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX];
+enum hinic_msix_state {
+ HINIC_MSIX_ENABLE,
+ HINIC_MSIX_DISABLE,
+};
- struct workqueue_struct *workq;
+void hinic_set_msix_state(void *hwdev, u16 msix_idx,
+ enum hinic_msix_state flag);
+enum hinic_msix_state hinic_get_msix_state(void *hwdev, u16 msix_idx);
+
+/* Define the version information structure */
+struct dev_version_info {
+ u8 up_ver; /* uP version, directly read from uP
+ * is not configured to file
+ */
+ u8 ucode_ver; /* The microcode version,
+ * read through the CMDq from microcode
+ */
+ u8 cfg_file_ver;/* uP configuration file version */
+ u8 sdk_ver; /* SDK driver version */
+ u8 hw_ver; /* Hardware version */
};
-struct hinic_mgmt_msg_handle_work {
- struct work_struct work;
- struct hinic_pf_to_mgmt *pf_to_mgmt;
+/* Obtain service_cap.dev_version_info */
+int hinic_dev_ver_info(void *hwdev, struct dev_version_info *ver);
+
+int hinic_vector_to_eqn(void *hwdev, enum hinic_service_type type, int vector);
- void *msg;
- u16 msg_len;
+/* Defines the IRQ information structure */
+struct irq_info {
+ u16 msix_entry_idx; /* IRQ corresponding index number */
+ u32 irq_id; /* the IRQ number from OS */
+};
- enum hinic_mod_type mod;
- u8 cmd;
- u16 msg_id;
- int async_mgmt_to_pf;
+int hinic_alloc_irqs(void *hwdev, enum hinic_service_type type, u16 req_num,
+ struct irq_info *irq_info_array, u16 *resp_num);
+void hinic_free_irq(void *hwdev, enum hinic_service_type type, u32 irq_id);
+int hinic_alloc_ceqs(void *hwdev, enum hinic_service_type type, int req_num,
+ int *ceq_id_array, int *resp_num);
+void hinic_free_ceq(void *hwdev, enum hinic_service_type type, int ceq_id);
+int hinic_sync_time(void *hwdev, u64 time);
+void hinic_sync_time_async(void *hwdev, u64 time);
+
+struct hinic_micro_log_info {
+ int (*init)(void *hwdev);
+ void (*deinit)(void *hwdev);
};
-void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod,
- void *handle,
- void (*callback)(void *handle,
- u8 cmd, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size));
+int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info);
+void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info);
-void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod);
+void hinic_disable_mgmt_msg_report(void *hwdev);
+void hinic_set_func_deinit_flag(void *hwdev);
+void hinic_flush_mgmt_workq(void *hwdev);
-int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
- enum hinic_mgmt_msg_type sync);
+enum func_nic_state {
+ HINIC_FUNC_NIC_DEL,
+ HINIC_FUNC_NIC_ADD,
+};
-int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_hwif *hwif);
+struct hinic_func_nic_state {
+ u8 state;
+ u8 rsvd0;
+ u16 func_idx;
-void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt);
+ u8 rsvd1[16];
+};
+int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state *state);
+int hinic_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en);
+bool hinic_get_master_host_mbox_enable(void *hwdev);
+bool hinic_get_slave_host_enable(void *hwdev, u8 host_id);
+int hinic_func_own_get(void *hwdev);
+void hinic_func_own_free(void *hwdev);
+int hinic_global_func_id_get(void *hwdev, u16 *func_id);
+u16 hinic_pf_id_of_vf_hw(void *hwdev);
+u16 hinic_global_func_id_hw(void *hwdev);
+bool hinic_func_for_pt(void *hwdev);
+bool hinic_func_for_hwpt(void *hwdev);
+u32 hinic_get_db_size(void *cfg_reg_base, enum hinic_chip_mode *chip_mode);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
deleted file mode 100644
index 537a8098bc4e..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ /dev/null
@@ -1,971 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/errno.h>
-#include <linux/sizes.h>
-#include <linux/atomic.h>
-#include <linux/skbuff.h>
-#include <linux/io.h>
-#include <asm/barrier.h>
-#include <asm/byteorder.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define SQ_DB_OFF SZ_2K
-
-/* The number of cache line to prefetch Until threshold state */
-#define WQ_PREFETCH_MAX 2
-/* The number of cache line to prefetch After threshold state */
-#define WQ_PREFETCH_MIN 1
-/* Threshold state */
-#define WQ_PREFETCH_THRESHOLD 256
-
-/* sizes of the SQ/RQ ctxt */
-#define Q_CTXT_SIZE 48
-#define CTXT_RSVD 240
-
-#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
- (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE)
-
-#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
- (((max_rqs) + (max_sqs)) * CTXT_RSVD + \
- (max_sqs + (q_id)) * Q_CTXT_SIZE)
-
-#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4)
-#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3)
-#define SECT_SIZE_FROM_8BYTES(size) ((size) << 3)
-
-#define SQ_DB_PI_HI_SHIFT 8
-#define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT)
-
-#define SQ_DB_PI_LOW_MASK 0xFF
-#define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK)
-
-#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi))
-
-#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
-#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
-
-enum sq_wqe_type {
- SQ_NORMAL_WQE = 0,
-};
-
-enum rq_completion_fmt {
- RQ_COMPLETE_SGE = 1
-};
-
-void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
- enum hinic_qp_ctxt_type ctxt_type,
- u16 num_queues, u16 max_queues)
-{
- u16 max_sqs = max_queues;
- u16 max_rqs = max_queues;
-
- qp_ctxt_hdr->num_queues = num_queues;
- qp_ctxt_hdr->queue_type = ctxt_type;
-
- if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
- qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
- else
- qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
-
- qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
-
- hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
-}
-
-void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
- struct hinic_sq *sq, u16 global_qid)
-{
- u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
- u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
- u16 pi_start, ci_start;
- struct hinic_wq *wq;
-
- wq = sq->wq;
- ci_start = atomic_read(&wq->cons_idx);
- pi_start = atomic_read(&wq->prod_idx);
-
- /* Read the first page paddr from the WQ page paddr ptrs */
- wq_page_addr = be64_to_cpu(*wq->block_vaddr);
-
- wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
- wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
- wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
-
- /* If only one page, use 0-level CLA */
- if (wq->num_q_pages == 1)
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq_page_addr);
- else
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
-
- wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
- wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
-
- sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid,
- GLOBAL_SQ_ID) |
- HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN);
-
- sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) |
- HINIC_SQ_CTXT_CI_SET(1, WRAPPED);
-
- sq_ctxt->wq_hi_pfn_pi =
- HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
- HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
-
- sq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
-
- sq_ctxt->pref_cache =
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
-
- sq_ctxt->pref_wrapped = 1;
-
- sq_ctxt->pref_wq_hi_pfn_ci =
- HINIC_SQ_CTXT_PREF_SET(ci_start, CI) |
- HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN);
-
- sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
-
- sq_ctxt->wq_block_hi_pfn =
- HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
-
- sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
-
- hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
-}
-
-void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
- struct hinic_rq *rq, u16 global_qid)
-{
- u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
- u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
- u16 pi_start, ci_start;
- struct hinic_wq *wq;
-
- wq = rq->wq;
- ci_start = atomic_read(&wq->cons_idx);
- pi_start = atomic_read(&wq->prod_idx);
-
- /* Read the first page paddr from the WQ page paddr ptrs */
- wq_page_addr = be64_to_cpu(*wq->block_vaddr);
-
- wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
- wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
- wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
-
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
- wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
- wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
-
- rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) |
- HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED);
-
- rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) |
- HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
-
- rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi,
- HI_PFN) |
- HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
-
- rq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
-
- rq_ctxt->pref_cache =
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
-
- rq_ctxt->pref_wrapped = 1;
-
- rq_ctxt->pref_wq_hi_pfn_ci =
- HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) |
- HINIC_RQ_CTXT_PREF_SET(ci_start, CI);
-
- rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
-
- rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
- rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
-
- rq_ctxt->wq_block_hi_pfn =
- HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
-
- rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
-
- hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
-}
-
-/**
- * alloc_sq_skb_arr - allocate sq array for saved skb
- * @sq: HW Send Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_sq_skb_arr(struct hinic_sq *sq)
-{
- struct hinic_wq *wq = sq->wq;
- size_t skb_arr_size;
-
- skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb);
- sq->saved_skb = vzalloc(skb_arr_size);
- if (!sq->saved_skb)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * free_sq_skb_arr - free sq array for saved skb
- * @sq: HW Send Queue
- **/
-static void free_sq_skb_arr(struct hinic_sq *sq)
-{
- vfree(sq->saved_skb);
-}
-
-/**
- * alloc_rq_skb_arr - allocate rq array for saved skb
- * @rq: HW Receive Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_rq_skb_arr(struct hinic_rq *rq)
-{
- struct hinic_wq *wq = rq->wq;
- size_t skb_arr_size;
-
- skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
- rq->saved_skb = vzalloc(skb_arr_size);
- if (!rq->saved_skb)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * free_rq_skb_arr - free rq array for saved skb
- * @rq: HW Receive Queue
- **/
-static void free_rq_skb_arr(struct hinic_rq *rq)
-{
- vfree(rq->saved_skb);
-}
-
-/**
- * hinic_init_sq - Initialize HW Send Queue
- * @sq: HW Send Queue
- * @hwif: HW Interface for accessing HW
- * @wq: Work Queue for the data of the SQ
- * @entry: msix entry for sq
- * @ci_addr: address for reading the current HW consumer index
- * @ci_dma_addr: dma address for reading the current HW consumer index
- * @db_base: doorbell base address
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry,
- void *ci_addr, dma_addr_t ci_dma_addr,
- void __iomem *db_base)
-{
- sq->hwif = hwif;
-
- sq->wq = wq;
-
- sq->irq = entry->vector;
- sq->msix_entry = entry->entry;
-
- sq->hw_ci_addr = ci_addr;
- sq->hw_ci_dma_addr = ci_dma_addr;
-
- sq->db_base = db_base + SQ_DB_OFF;
-
- return alloc_sq_skb_arr(sq);
-}
-
-/**
- * hinic_clean_sq - Clean HW Send Queue's Resources
- * @sq: Send Queue
- **/
-void hinic_clean_sq(struct hinic_sq *sq)
-{
- free_sq_skb_arr(sq);
-}
-
-/**
- * alloc_rq_cqe - allocate rq completion queue elements
- * @rq: HW Receive Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_rq_cqe(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t cqe_dma_size, cqe_size;
- struct hinic_wq *wq = rq->wq;
- int j, i;
-
- cqe_size = wq->q_depth * sizeof(*rq->cqe);
- rq->cqe = vzalloc(cqe_size);
- if (!rq->cqe)
- return -ENOMEM;
-
- cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
- rq->cqe_dma = vzalloc(cqe_dma_size);
- if (!rq->cqe_dma)
- goto err_cqe_dma_arr_alloc;
-
- for (i = 0; i < wq->q_depth; i++) {
- rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
- sizeof(*rq->cqe[i]),
- &rq->cqe_dma[i], GFP_KERNEL);
- if (!rq->cqe[i])
- goto err_cqe_alloc;
- }
-
- return 0;
-
-err_cqe_alloc:
- for (j = 0; j < i; j++)
- dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
- rq->cqe_dma[j]);
-
- vfree(rq->cqe_dma);
-
-err_cqe_dma_arr_alloc:
- vfree(rq->cqe);
- return -ENOMEM;
-}
-
-/**
- * free_rq_cqe - free rq completion queue elements
- * @rq: HW Receive Queue
- **/
-static void free_rq_cqe(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_wq *wq = rq->wq;
- int i;
-
- for (i = 0; i < wq->q_depth; i++)
- dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
- rq->cqe_dma[i]);
-
- vfree(rq->cqe_dma);
- vfree(rq->cqe);
-}
-
-/**
- * hinic_init_rq - Initialize HW Receive Queue
- * @rq: HW Receive Queue
- * @hwif: HW Interface for accessing HW
- * @wq: Work Queue for the data of the RQ
- * @entry: msix entry for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry)
-{
- struct pci_dev *pdev = hwif->pdev;
- size_t pi_size;
- int err;
-
- rq->hwif = hwif;
-
- rq->wq = wq;
-
- rq->irq = entry->vector;
- rq->msix_entry = entry->entry;
-
- rq->buf_sz = HINIC_RX_BUF_SZ;
-
- err = alloc_rq_skb_arr(rq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate rq priv data\n");
- return err;
- }
-
- err = alloc_rq_cqe(rq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate rq cqe\n");
- goto err_alloc_rq_cqe;
- }
-
- /* HW requirements: Must be at least 32 bit */
- pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
- &rq->pi_dma_addr, GFP_KERNEL);
- if (!rq->pi_virt_addr) {
- err = -ENOMEM;
- goto err_pi_virt;
- }
-
- return 0;
-
-err_pi_virt:
- free_rq_cqe(rq);
-
-err_alloc_rq_cqe:
- free_rq_skb_arr(rq);
- return err;
-}
-
-/**
- * hinic_clean_rq - Clean HW Receive Queue's Resources
- * @rq: HW Receive Queue
- **/
-void hinic_clean_rq(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t pi_size;
-
- pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr,
- rq->pi_dma_addr);
-
- free_rq_cqe(rq);
- free_rq_skb_arr(rq);
-}
-
-/**
- * hinic_get_sq_free_wqebbs - return number of free wqebbs for use
- * @sq: send queue
- *
- * Return number of free wqebbs
- **/
-int hinic_get_sq_free_wqebbs(struct hinic_sq *sq)
-{
- struct hinic_wq *wq = sq->wq;
-
- return atomic_read(&wq->delta) - 1;
-}
-
-/**
- * hinic_get_rq_free_wqebbs - return number of free wqebbs for use
- * @rq: recv queue
- *
- * Return number of free wqebbs
- **/
-int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
-{
- struct hinic_wq *wq = rq->wq;
-
- return atomic_read(&wq->delta) - 1;
-}
-
-static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, int nr_descs)
-{
- u32 ctrl_size, task_size, bufdesc_size;
-
- ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
- task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
- bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc);
- bufdesc_size = SIZE_8BYTES(bufdesc_size);
-
- ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
- HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) |
- HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- HINIC_SQ_CTRL_SET(ctrl_size, LEN);
-
- ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT,
- QUEUE_INFO_MSS) |
- HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC);
-}
-
-static void sq_prepare_task(struct hinic_sq_task *task)
-{
- task->pkt_info0 = 0;
- task->pkt_info1 = 0;
- task->pkt_info2 = 0;
-
- task->ufo_v6_identify = 0;
-
- task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE);
-
- task->zero_pad = 0;
-}
-
-void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len)
-{
- task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN);
-}
-
-void hinic_task_set_outter_l3(struct hinic_sq_task *task,
- enum hinic_l3_offload_type l3_type,
- u32 network_len)
-{
- task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) |
- HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN);
-}
-
-void hinic_task_set_inner_l3(struct hinic_sq_task *task,
- enum hinic_l3_offload_type l3_type,
- u32 network_len)
-{
- task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE);
- task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN);
-}
-
-void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
- enum hinic_l4_tunnel_type l4_type,
- u32 tunnel_len)
-{
- task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
- HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN);
-}
-
-void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
- enum hinic_l4_offload_type l4_offload,
- u32 l4_len, u32 offset)
-{
- u32 tcp_udp_cs = 0, sctp = 0;
- u32 mss = HINIC_MSS_DEFAULT;
-
- if (l4_offload == TCP_OFFLOAD_ENABLE ||
- l4_offload == UDP_OFFLOAD_ENABLE)
- tcp_udp_cs = 1;
- else if (l4_offload == SCTP_OFFLOAD_ENABLE)
- sctp = 1;
-
- task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
- task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
-
- *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
- HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) |
- HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP);
-
- *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
- *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
-}
-
-void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
- enum hinic_l4_offload_type l4_offload,
- u32 l4_len, u32 offset, u32 ip_ident, u32 mss)
-{
- u32 tso = 0, ufo = 0;
-
- if (l4_offload == TCP_OFFLOAD_ENABLE)
- tso = 1;
- else if (l4_offload == UDP_OFFLOAD_ENABLE)
- ufo = 1;
-
- task->ufo_v6_identify = ip_ident;
-
- task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
- task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG);
- task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
-
- *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
- HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) |
- HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) |
- HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS);
-
- /* set MSS value */
- *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
- *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
-}
-
-/**
- * hinic_sq_prepare_wqe - prepare wqe before insert to the queue
- * @sq: send queue
- * @sq_wqe: wqe to prepare
- * @sges: sges for use by the wqe for send for buf addresses
- * @nr_sges: number of sges
- **/
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe,
- struct hinic_sge *sges, int nr_sges)
-{
- int i;
-
- sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges);
-
- sq_prepare_task(&sq_wqe->task);
-
- for (i = 0; i < nr_sges; i++)
- sq_wqe->buf_descs[i].sge = sges[i];
-}
-
-/**
- * sq_prepare_db - prepare doorbell to write
- * @sq: send queue
- * @prod_idx: pi value for the doorbell
- * @cos: cos of the doorbell
- *
- * Return db value
- **/
-static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos)
-{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx));
-
- /* Data should be written to HW in Big Endian Format */
- return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) |
- HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) |
- HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) |
- HINIC_SQ_DB_INFO_SET(cos, COS) |
- HINIC_SQ_DB_INFO_SET(qp->q_id, QID));
-}
-
-/**
- * hinic_sq_write_db- write doorbell
- * @sq: send queue
- * @prod_idx: pi value for the doorbell
- * @wqe_size: wqe size
- * @cos: cos of the wqe
- **/
-void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
- unsigned int cos)
-{
- struct hinic_wq *wq = sq->wq;
-
- /* increment prod_idx to the next */
- prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
- prod_idx = SQ_MASKED_IDX(sq, prod_idx);
-
- wmb(); /* Write all before the doorbell */
-
- writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx));
-}
-
-/**
- * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi
- * @sq: sq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
- unsigned int wqe_size, u16 *prod_idx)
-{
- struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size,
- prod_idx);
-
- if (IS_ERR(hw_wqe))
- return NULL;
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_return_wqe - return the wqe to the sq
- * @sq: send queue
- * @wqe_size: the size of the wqe
- **/
-void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size)
-{
- hinic_return_wqe(sq->wq, wqe_size);
-}
-
-/**
- * hinic_sq_write_wqe - write the wqe to the sq
- * @sq: send queue
- * @prod_idx: pi of the wqe
- * @sq_wqe: the wqe to write
- * @skb: skb to save
- * @wqe_size: the size of the wqe
- **/
-void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe,
- struct sk_buff *skb, unsigned int wqe_size)
-{
- struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe;
-
- sq->saved_skb[prod_idx] = skb;
-
- /* The data in the HW should be in Big Endian Format */
- hinic_cpu_to_be32(sq_wqe, wqe_size);
-
- hinic_write_wqe(sq->wq, hw_wqe, wqe_size);
-}
-
-/**
- * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the
- * wqe only have one wqebb
- * @sq: send queue
- * @skb: return skb that was saved
- * @wqe_size: the wqe size ptr
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_sq_wqe *sq_wqe;
- struct hinic_sq_ctrl *ctrl;
- unsigned int buf_sect_len;
- u32 ctrl_info;
-
- /* read the ctrl section for getting wqe size */
- hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx);
- if (IS_ERR(hw_wqe))
- return NULL;
-
- *skb = sq->saved_skb[*cons_idx];
-
- sq_wqe = &hw_wqe->sq_wqe;
- ctrl = &sq_wqe->ctrl;
- ctrl_info = be32_to_cpu(ctrl->ctrl_info);
- buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN);
-
- *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
- *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len);
- *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size);
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
- * @sq: send queue
- * @skb: return skb that was saved
- * @wqe_size: the size of the wqe
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int wqe_size, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
-
- hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx);
- *skb = sq->saved_skb[*cons_idx];
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_put_wqe - release the ci for new wqes
- * @sq: send queue
- * @wqe_size: the size of the wqe
- **/
-void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size)
-{
- hinic_put_wqe(sq->wq, wqe_size);
-}
-
-/**
- * hinic_sq_get_sges - get sges from the wqe
- * @sq_wqe: wqe to get the sges from its buffer addresses
- * @sges: returned sges
- * @nr_sges: number sges to return
- **/
-void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
-{
- int i;
-
- for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) {
- sges[i] = sq_wqe->buf_descs[i].sge;
- hinic_be32_to_cpu(&sges[i], sizeof(sges[i]));
- }
-}
-
-/**
- * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi
- * @rq: rq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
- unsigned int wqe_size, u16 *prod_idx)
-{
- struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
- prod_idx);
-
- if (IS_ERR(hw_wqe))
- return NULL;
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_rq_write_wqe - write the wqe to the rq
- * @rq: recv queue
- * @prod_idx: pi of the wqe
- * @rq_wqe: the wqe to write
- * @skb: skb to save
- **/
-void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb)
-{
- struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe;
-
- rq->saved_skb[prod_idx] = skb;
-
- /* The data in the HW should be in Big Endian Format */
- hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe));
-
- hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe));
-}
-
-/**
- * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci
- * @rq: recv queue
- * @wqe_size: the size of the wqe
- * @skb: return saved skb
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_rq_cqe *cqe;
- int rx_done;
- u32 status;
-
- hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx);
- if (IS_ERR(hw_wqe))
- return NULL;
-
- cqe = rq->cqe[*cons_idx];
-
- status = be32_to_cpu(cqe->status);
-
- rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE);
- if (!rx_done)
- return NULL;
-
- *skb = rq->saved_skb[*cons_idx];
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position
- * @rq: recv queue
- * @wqe_size: the size of the wqe
- * @skb: return saved skb
- * @cons_idx: consumer index in the wq
- *
- * Return wqe in incremented ci position
- **/
-struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb,
- u16 *cons_idx)
-{
- struct hinic_wq *wq = rq->wq;
- struct hinic_hw_wqe *hw_wqe;
- unsigned int num_wqebbs;
-
- wqe_size = ALIGN(wqe_size, wq->wqebb_size);
- num_wqebbs = wqe_size / wq->wqebb_size;
-
- *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs);
-
- *skb = rq->saved_skb[*cons_idx];
-
- hw_wqe = hinic_read_wqe_direct(wq, *cons_idx);
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_rq_put_wqe - release the ci for new wqes
- * @rq: recv queue
- * @cons_idx: consumer index of the wqe
- * @wqe_size: the size of the wqe
- **/
-void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
- unsigned int wqe_size)
-{
- struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
- u32 status = be32_to_cpu(cqe->status);
-
- status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE);
-
- /* Rx WQE size is 1 WQEBB, no wq shadow*/
- cqe->status = cpu_to_be32(status);
-
- wmb(); /* clear done flag */
-
- hinic_put_wqe(rq->wq, wqe_size);
-}
-
-/**
- * hinic_rq_get_sge - get sge from the wqe
- * @rq: recv queue
- * @rq_wqe: wqe to get the sge from its buf address
- * @cons_idx: consumer index
- * @sge: returned sge
- **/
-void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
- u16 cons_idx, struct hinic_sge *sge)
-{
- struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
- u32 len = be32_to_cpu(cqe->len);
-
- sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr);
- sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr);
- sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN);
-}
-
-/**
- * hinic_rq_prepare_wqe - prepare wqe before insert to the queue
- * @rq: recv queue
- * @prod_idx: pi value
- * @rq_wqe: the wqe
- * @sge: sge for use by the wqe for recv buf address
- **/
-void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge)
-{
- struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
- struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
- struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
- struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
- dma_addr_t cqe_dma = rq->cqe_dma[prod_idx];
-
- ctrl->ctrl_info =
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)),
- COMPLETE_LEN) |
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)),
- BUFDESC_SECT_LEN) |
- HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
-
- hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe));
-
- buf_desc->hi_addr = sge->hi_addr;
- buf_desc->lo_addr = sge->lo_addr;
-}
-
-/**
- * hinic_rq_update - update pi of the rq
- * @rq: recv queue
- * @prod_idx: pi value
- **/
-void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
-{
- *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1));
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
deleted file mode 100644
index 178dcc874370..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_QP_H
-#define HINIC_HW_QP_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sizes.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp_ctxt.h"
-
-#define HINIC_SQ_DB_INFO_PI_HI_SHIFT 0
-#define HINIC_SQ_DB_INFO_QID_SHIFT 8
-#define HINIC_SQ_DB_INFO_PATH_SHIFT 23
-#define HINIC_SQ_DB_INFO_COS_SHIFT 24
-#define HINIC_SQ_DB_INFO_TYPE_SHIFT 27
-
-#define HINIC_SQ_DB_INFO_PI_HI_MASK 0xFF
-#define HINIC_SQ_DB_INFO_QID_MASK 0x3FF
-#define HINIC_SQ_DB_INFO_PATH_MASK 0x1
-#define HINIC_SQ_DB_INFO_COS_MASK 0x7
-#define HINIC_SQ_DB_INFO_TYPE_MASK 0x1F
-
-#define HINIC_SQ_DB_INFO_SET(val, member) \
- (((u32)(val) & HINIC_SQ_DB_INFO_##member##_MASK) \
- << HINIC_SQ_DB_INFO_##member##_SHIFT)
-
-#define HINIC_SQ_WQEBB_SIZE 64
-#define HINIC_RQ_WQEBB_SIZE 32
-
-#define HINIC_SQ_PAGE_SIZE SZ_256K
-#define HINIC_RQ_PAGE_SIZE SZ_256K
-
-#define HINIC_SQ_DEPTH SZ_4K
-#define HINIC_RQ_DEPTH SZ_4K
-
-#define HINIC_MAX_QUEUE_DEPTH SZ_4K
-#define HINIC_MIN_QUEUE_DEPTH 128
-
-/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */
-#define HINIC_RX_BUF_SZ 2048
-#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX
-
-#define HINIC_MIN_TX_WQE_SIZE(wq) \
- ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size)
-
-#define HINIC_MIN_TX_NUM_WQEBBS(sq) \
- (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size)
-
-enum hinic_rx_buf_sz_idx {
- HINIC_RX_BUF_SZ_32_IDX,
- HINIC_RX_BUF_SZ_64_IDX,
- HINIC_RX_BUF_SZ_96_IDX,
- HINIC_RX_BUF_SZ_128_IDX,
- HINIC_RX_BUF_SZ_192_IDX,
- HINIC_RX_BUF_SZ_256_IDX,
- HINIC_RX_BUF_SZ_384_IDX,
- HINIC_RX_BUF_SZ_512_IDX,
- HINIC_RX_BUF_SZ_768_IDX,
- HINIC_RX_BUF_SZ_1024_IDX,
- HINIC_RX_BUF_SZ_1536_IDX,
- HINIC_RX_BUF_SZ_2048_IDX,
- HINIC_RX_BUF_SZ_3072_IDX,
- HINIC_RX_BUF_SZ_4096_IDX,
- HINIC_RX_BUF_SZ_8192_IDX,
- HINIC_RX_BUF_SZ_16384_IDX,
-};
-
-struct hinic_sq {
- struct hinic_hwif *hwif;
-
- struct hinic_wq *wq;
-
- u16 qid;
-
- u32 irq;
- u16 msix_entry;
-
- void *hw_ci_addr;
- dma_addr_t hw_ci_dma_addr;
-
- void __iomem *db_base;
-
- struct sk_buff **saved_skb;
- struct hinic_debug_priv *dbg;
-};
-
-struct hinic_rq {
- struct hinic_hwif *hwif;
-
- struct hinic_wq *wq;
-
- u16 qid;
-
- struct cpumask affinity_mask;
- u32 irq;
- u16 msix_entry;
-
- size_t buf_sz;
-
- struct sk_buff **saved_skb;
-
- struct hinic_rq_cqe **cqe;
- dma_addr_t *cqe_dma;
-
- u16 *pi_virt_addr;
- dma_addr_t pi_dma_addr;
- struct hinic_debug_priv *dbg;
-};
-
-struct hinic_qp {
- struct hinic_sq sq;
- struct hinic_rq rq;
-
- u16 q_id;
-};
-
-void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
- enum hinic_qp_ctxt_type ctxt_type,
- u16 num_queues, u16 max_queues);
-
-void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
- struct hinic_sq *sq, u16 global_qid);
-
-void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
- struct hinic_rq *rq, u16 global_qid);
-
-int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr,
- dma_addr_t ci_dma_addr, void __iomem *db_base);
-
-void hinic_clean_sq(struct hinic_sq *sq);
-
-int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry);
-
-void hinic_clean_rq(struct hinic_rq *rq);
-
-int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
-
-int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
-
-void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len);
-
-void hinic_task_set_outter_l3(struct hinic_sq_task *task,
- enum hinic_l3_offload_type l3_type,
- u32 network_len);
-
-void hinic_task_set_inner_l3(struct hinic_sq_task *task,
- enum hinic_l3_offload_type l3_type,
- u32 network_len);
-
-void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
- enum hinic_l4_tunnel_type l4_type,
- u32 tunnel_len);
-
-void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
- u32 *queue_info,
- enum hinic_l4_offload_type l4_offload,
- u32 l4_len, u32 offset);
-
-void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
- u32 *queue_info,
- enum hinic_l4_offload_type l4_offload,
- u32 l4_len,
- u32 offset, u32 ip_ident, u32 mss);
-
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe,
- struct hinic_sge *sges, int nr_sges);
-
-void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
- unsigned int cos);
-
-struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
- unsigned int wqe_size, u16 *prod_idx);
-
-void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size);
-
-void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct sk_buff *skb,
- unsigned int wqe_size);
-
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int wqe_size, u16 *cons_idx);
-
-struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx);
-
-void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size);
-
-void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
-
-struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
- unsigned int wqe_size, u16 *prod_idx);
-
-void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *wqe, struct sk_buff *skb);
-
-struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb, u16 *cons_idx);
-
-struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb,
- u16 *cons_idx);
-
-void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
- unsigned int wqe_size);
-
-void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe,
- u16 cons_idx, struct hinic_sge *sge);
-
-void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *wqe, struct hinic_sge *sge);
-
-void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
deleted file mode 100644
index 00900a6640ad..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_QP_CTXT_H
-#define HINIC_HW_QP_CTXT_H
-
-#include <linux/types.h>
-
-#include "hinic_hw_cmdq.h"
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13
-#define HINIC_SQ_CTXT_CEQ_ATTR_EN_SHIFT 23
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FF
-#define HINIC_SQ_CTXT_CEQ_ATTR_EN_MASK 0x1
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_CEQ_ATTR_##member##_MASK) \
- << HINIC_SQ_CTXT_CEQ_ATTR_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_CI_IDX_SHIFT 11
-#define HINIC_SQ_CTXT_CI_WRAPPED_SHIFT 23
-
-#define HINIC_SQ_CTXT_CI_IDX_MASK 0xFFF
-#define HINIC_SQ_CTXT_CI_WRAPPED_MASK 0x1
-
-#define HINIC_SQ_CTXT_CI_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_CI_##member##_MASK) \
- << HINIC_SQ_CTXT_CI_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define HINIC_SQ_CTXT_WQ_PAGE_PI_SHIFT 20
-
-#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
-#define HINIC_SQ_CTXT_WQ_PAGE_PI_MASK 0xFFF
-
-#define HINIC_SQ_CTXT_WQ_PAGE_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_WQ_PAGE_##member##_MASK) \
- << HINIC_SQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define HINIC_SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define HINIC_SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
-#define HINIC_SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
-#define HINIC_SQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
-
-#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
-#define HINIC_SQ_CTXT_PREF_CI_SHIFT 20
-
-#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
-#define HINIC_SQ_CTXT_PREF_CI_MASK 0xFFF
-
-#define HINIC_SQ_CTXT_PREF_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_PREF_##member##_MASK) \
- << HINIC_SQ_CTXT_PREF_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_WQ_BLOCK_##member##_MASK) \
- << HINIC_SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_EN_SHIFT 0
-#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_SHIFT 1
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_EN_MASK 0x1
-#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_MASK 0x1
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_CEQ_ATTR_##member##_MASK) \
- << HINIC_RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_PI_IDX_SHIFT 0
-#define HINIC_RQ_CTXT_PI_INTR_SHIFT 22
-
-#define HINIC_RQ_CTXT_PI_IDX_MASK 0xFFF
-#define HINIC_RQ_CTXT_PI_INTR_MASK 0x3FF
-
-#define HINIC_RQ_CTXT_PI_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_PI_##member##_MASK) << \
- HINIC_RQ_CTXT_PI_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define HINIC_RQ_CTXT_WQ_PAGE_CI_SHIFT 20
-
-#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
-#define HINIC_RQ_CTXT_WQ_PAGE_CI_MASK 0xFFF
-
-#define HINIC_RQ_CTXT_WQ_PAGE_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_WQ_PAGE_##member##_MASK) << \
- HINIC_RQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define HINIC_RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define HINIC_RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
-#define HINIC_RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
-#define HINIC_RQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
-
-#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
-#define HINIC_RQ_CTXT_PREF_CI_SHIFT 20
-
-#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
-#define HINIC_RQ_CTXT_PREF_CI_MASK 0xFFF
-
-#define HINIC_RQ_CTXT_PREF_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_PREF_##member##_MASK) << \
- HINIC_RQ_CTXT_PREF_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
- HINIC_RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_SIZE(num_sqs) (sizeof(struct hinic_qp_ctxt_header) \
- + (num_sqs) * sizeof(struct hinic_sq_ctxt))
-
-#define HINIC_RQ_CTXT_SIZE(num_rqs) (sizeof(struct hinic_qp_ctxt_header) \
- + (num_rqs) * sizeof(struct hinic_rq_ctxt))
-
-#define HINIC_WQ_PAGE_PFN_SHIFT 12
-#define HINIC_WQ_BLOCK_PFN_SHIFT 9
-
-#define HINIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> HINIC_WQ_PAGE_PFN_SHIFT)
-#define HINIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> \
- HINIC_WQ_BLOCK_PFN_SHIFT)
-
-#define HINIC_Q_CTXT_MAX \
- ((HINIC_CMDQ_BUF_SIZE - sizeof(struct hinic_qp_ctxt_header)) \
- / sizeof(struct hinic_sq_ctxt))
-
-enum hinic_qp_ctxt_type {
- HINIC_QP_CTXT_TYPE_SQ,
- HINIC_QP_CTXT_TYPE_RQ
-};
-
-struct hinic_qp_ctxt_header {
- u16 num_queues;
- u16 queue_type;
- u32 addr_offset;
-};
-
-struct hinic_sq_ctxt {
- u32 ceq_attr;
-
- u32 ci_wrapped;
-
- u32 wq_hi_pfn_pi;
- u32 wq_lo_pfn;
-
- u32 pref_cache;
- u32 pref_wrapped;
- u32 pref_wq_hi_pfn_ci;
- u32 pref_wq_lo_pfn;
-
- u32 rsvd0;
- u32 rsvd1;
-
- u32 wq_block_hi_pfn;
- u32 wq_block_lo_pfn;
-};
-
-struct hinic_rq_ctxt {
- u32 ceq_attr;
-
- u32 pi_intr_attr;
-
- u32 wq_hi_pfn_ci;
- u32 wq_lo_pfn;
-
- u32 pref_cache;
- u32 pref_wrapped;
-
- u32 pref_wq_hi_pfn_ci;
- u32 pref_wq_lo_pfn;
-
- u32 pi_paddr_hi;
- u32 pi_paddr_lo;
-
- u32 wq_block_hi_pfn;
- u32 wq_block_lo_pfn;
-};
-
-struct hinic_clean_queue_ctxt {
- struct hinic_qp_ctxt_header cmdq_hdr;
- u32 ctxt_size;
-};
-
-struct hinic_sq_ctxt_block {
- struct hinic_qp_ctxt_header hdr;
- struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
-};
-
-struct hinic_rq_ctxt_block {
- struct hinic_qp_ctxt_header hdr;
- struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX];
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
deleted file mode 100644
index e1a1735c00c1..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ /dev/null
@@ -1,904 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include <linux/semaphore.h>
-#include <linux/errno.h>
-#include <linux/vmalloc.h>
-#include <linux/err.h>
-#include <asm/byteorder.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-
-#define WQS_BLOCKS_PER_PAGE 4
-
-#define WQ_BLOCK_SIZE 4096
-#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
-
-#define WQS_MAX_NUM_BLOCKS 128
-#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
- sizeof((wqs)->free_blocks[0]))
-
-#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
-
-#define WQ_PAGE_ADDR_SIZE sizeof(u64)
-#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
-
-#define CMDQ_BLOCK_SIZE 512
-#define CMDQ_PAGE_SIZE 4096
-
-#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
-
-#define WQ_BASE_VADDR(wqs, wq) \
- ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define WQ_BASE_PADDR(wqs, wq) \
- ((wqs)->page_paddr[(wq)->page_idx] \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define WQ_BASE_ADDR(wqs, wq) \
- ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
- ((void *)((cmdq_pages)->page_vaddr) \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
- ((cmdq_pages)->page_paddr \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
- ((void *)((cmdq_pages)->shadow_page_vaddr) \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define WQ_PAGE_ADDR(wq, idx) \
- ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
-
-#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
-
-#define WQE_IN_RANGE(wqe, start, end) \
- (((unsigned long)(wqe) >= (unsigned long)(start)) && \
- ((unsigned long)(wqe) < (unsigned long)(end)))
-
-#define WQE_SHADOW_PAGE(wq, wqe) \
- (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
- / (wq)->max_wqe_size)
-
-static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
-{
- return (((idx) & ((wq)->num_wqebbs_per_page - 1))
- << (wq)->wqebb_size_shift);
-}
-
-static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
-{
- return (((idx) >> ((wq)->wqebbs_per_page_shift))
- & ((wq)->num_q_pages - 1));
-}
-
-/**
- * queue_alloc_page - allocate page for Queue
- * @hwif: HW interface for allocating DMA
- * @vaddr: virtual address will be returned in this address
- * @paddr: physical address will be returned in this address
- * @shadow_vaddr: VM area will be return here for holding WQ page addresses
- * @page_sz: page size of each WQ page
- *
- * Return 0 - Success, negative - Failure
- **/
-static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
- void ***shadow_vaddr, size_t page_sz)
-{
- struct pci_dev *pdev = hwif->pdev;
- dma_addr_t dma_addr;
-
- *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
- GFP_KERNEL);
- if (!*vaddr) {
- dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
- return -ENOMEM;
- }
-
- *paddr = (u64)dma_addr;
-
- /* use vzalloc for big mem */
- *shadow_vaddr = vzalloc(page_sz);
- if (!*shadow_vaddr)
- goto err_shadow_vaddr;
-
- return 0;
-
-err_shadow_vaddr:
- dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr);
- return -ENOMEM;
-}
-
-/**
- * wqs_allocate_page - allocate page for WQ set
- * @wqs: Work Queue Set
- * @page_idx: the page index of the page will be allocated
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx)
-{
- return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx],
- &wqs->page_paddr[page_idx],
- &wqs->shadow_page_vaddr[page_idx],
- WQS_PAGE_SIZE);
-}
-
-/**
- * wqs_free_page - free page of WQ set
- * @wqs: Work Queue Set
- * @page_idx: the page index of the page will be freed
- **/
-static void wqs_free_page(struct hinic_wqs *wqs, int page_idx)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE,
- wqs->page_vaddr[page_idx],
- (dma_addr_t)wqs->page_paddr[page_idx]);
- vfree(wqs->shadow_page_vaddr[page_idx]);
-}
-
-/**
- * cmdq_allocate_page - allocate page for cmdq
- * @cmdq_pages: the pages of the cmdq queue struct to hold the page
- *
- * Return 0 - Success, negative - Failure
- **/
-static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
-{
- return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr,
- &cmdq_pages->page_paddr,
- &cmdq_pages->shadow_page_vaddr,
- CMDQ_PAGE_SIZE);
-}
-
-/**
- * cmdq_free_page - free page from cmdq
- * @cmdq_pages: the pages of the cmdq queue struct that hold the page
- **/
-static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
-{
- struct hinic_hwif *hwif = cmdq_pages->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE,
- cmdq_pages->page_vaddr,
- (dma_addr_t)cmdq_pages->page_paddr);
- vfree(cmdq_pages->shadow_page_vaddr);
-}
-
-static int alloc_page_arrays(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
- sizeof(*wqs->page_paddr), GFP_KERNEL);
- if (!wqs->page_paddr)
- return -ENOMEM;
-
- wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
- sizeof(*wqs->page_vaddr), GFP_KERNEL);
- if (!wqs->page_vaddr)
- goto err_page_vaddr;
-
- wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
- sizeof(*wqs->shadow_page_vaddr),
- GFP_KERNEL);
- if (!wqs->shadow_page_vaddr)
- goto err_page_shadow_vaddr;
-
- return 0;
-
-err_page_shadow_vaddr:
- devm_kfree(&pdev->dev, wqs->page_vaddr);
-
-err_page_vaddr:
- devm_kfree(&pdev->dev, wqs->page_paddr);
- return -ENOMEM;
-}
-
-static void free_page_arrays(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- devm_kfree(&pdev->dev, wqs->shadow_page_vaddr);
- devm_kfree(&pdev->dev, wqs->page_vaddr);
- devm_kfree(&pdev->dev, wqs->page_paddr);
-}
-
-static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx,
- int *block_idx)
-{
- int pos;
-
- down(&wqs->alloc_blocks_lock);
-
- wqs->num_free_blks--;
-
- if (wqs->num_free_blks < 0) {
- wqs->num_free_blks++;
- up(&wqs->alloc_blocks_lock);
- return -ENOMEM;
- }
-
- pos = wqs->alloc_blk_pos++;
- pos &= WQS_MAX_NUM_BLOCKS - 1;
-
- *page_idx = wqs->free_blocks[pos].page_idx;
- *block_idx = wqs->free_blocks[pos].block_idx;
-
- wqs->free_blocks[pos].page_idx = -1;
- wqs->free_blocks[pos].block_idx = -1;
-
- up(&wqs->alloc_blocks_lock);
- return 0;
-}
-
-static void wqs_return_block(struct hinic_wqs *wqs, int page_idx,
- int block_idx)
-{
- int pos;
-
- down(&wqs->alloc_blocks_lock);
-
- pos = wqs->return_blk_pos++;
- pos &= WQS_MAX_NUM_BLOCKS - 1;
-
- wqs->free_blocks[pos].page_idx = page_idx;
- wqs->free_blocks[pos].block_idx = block_idx;
-
- wqs->num_free_blks++;
-
- up(&wqs->alloc_blocks_lock);
-}
-
-static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
-{
- int page_idx, blk_idx, pos = 0;
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
- for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
- wqs->free_blocks[pos].page_idx = page_idx;
- wqs->free_blocks[pos].block_idx = blk_idx;
- pos++;
- }
- }
-
- wqs->alloc_blk_pos = 0;
- wqs->return_blk_pos = pos;
- wqs->num_free_blks = pos;
-
- sema_init(&wqs->alloc_blocks_lock, 1);
-}
-
-/**
- * hinic_wqs_alloc - allocate Work Queues set
- * @wqs: Work Queue Set
- * @max_wqs: maximum wqs to allocate
- * @hwif: HW interface for use for the allocation
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs,
- struct hinic_hwif *hwif)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err, i, page_idx;
-
- max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE);
- if (max_wqs > WQS_MAX_NUM_BLOCKS) {
- dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs);
- return -EINVAL;
- }
-
- wqs->hwif = hwif;
- wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE;
-
- if (alloc_page_arrays(wqs)) {
- dev_err(&pdev->dev,
- "Failed to allocate mem for page addresses\n");
- return -ENOMEM;
- }
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
- err = wqs_allocate_page(wqs, page_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed wq page allocation\n");
- goto err_wq_allocate_page;
- }
- }
-
- wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs),
- GFP_KERNEL);
- if (!wqs->free_blocks) {
- err = -ENOMEM;
- goto err_alloc_blocks;
- }
-
- init_wqs_blocks_arr(wqs);
- return 0;
-
-err_alloc_blocks:
-err_wq_allocate_page:
- for (i = 0; i < page_idx; i++)
- wqs_free_page(wqs, i);
-
- free_page_arrays(wqs);
- return err;
-}
-
-/**
- * hinic_wqs_free - free Work Queues set
- * @wqs: Work Queue Set
- **/
-void hinic_wqs_free(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int page_idx;
-
- devm_kfree(&pdev->dev, wqs->free_blocks);
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
- wqs_free_page(wqs, page_idx);
-
- free_page_arrays(wqs);
-}
-
-/**
- * alloc_wqes_shadow - allocate WQE shadows for WQ
- * @wq: WQ to allocate shadows for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_wqes_shadow(struct hinic_wq *wq)
-{
- struct hinic_hwif *hwif = wq->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
- wq->max_wqe_size, GFP_KERNEL);
- if (!wq->shadow_wqe)
- return -ENOMEM;
-
- wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
- sizeof(*wq->shadow_idx), GFP_KERNEL);
- if (!wq->shadow_idx)
- goto err_shadow_idx;
-
- return 0;
-
-err_shadow_idx:
- devm_kfree(&pdev->dev, wq->shadow_wqe);
- return -ENOMEM;
-}
-
-/**
- * free_wqes_shadow - free WQE shadows of WQ
- * @wq: WQ to free shadows from
- **/
-static void free_wqes_shadow(struct hinic_wq *wq)
-{
- struct hinic_hwif *hwif = wq->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- devm_kfree(&pdev->dev, wq->shadow_idx);
- devm_kfree(&pdev->dev, wq->shadow_wqe);
-}
-
-/**
- * free_wq_pages - free pages of WQ
- * @hwif: HW interface for releasing dma addresses
- * @wq: WQ to free pages from
- * @num_q_pages: number pages to free
- **/
-static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
- int num_q_pages)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i;
-
- for (i = 0; i < num_q_pages; i++) {
- void **vaddr = &wq->shadow_block_vaddr[i];
- u64 *paddr = &wq->block_vaddr[i];
- dma_addr_t dma_addr;
-
- dma_addr = (dma_addr_t)be64_to_cpu(*paddr);
- dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
- dma_addr);
- }
-
- free_wqes_shadow(wq);
-}
-
-/**
- * alloc_wq_pages - alloc pages for WQ
- * @hwif: HW interface for allocating dma addresses
- * @wq: WQ to allocate pages for
- * @max_pages: maximum pages allowed
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
- int max_pages)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i, err, num_q_pages;
-
- num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
- if (num_q_pages > max_pages) {
- dev_err(&pdev->dev, "Number wq pages exceeds the limit\n");
- return -EINVAL;
- }
-
- if (num_q_pages & (num_q_pages - 1)) {
- dev_err(&pdev->dev, "Number wq pages must be power of 2\n");
- return -EINVAL;
- }
-
- wq->num_q_pages = num_q_pages;
-
- err = alloc_wqes_shadow(wq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate wqe shadow\n");
- return err;
- }
-
- for (i = 0; i < num_q_pages; i++) {
- void **vaddr = &wq->shadow_block_vaddr[i];
- u64 *paddr = &wq->block_vaddr[i];
- dma_addr_t dma_addr;
-
- *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
- &dma_addr, GFP_KERNEL);
- if (!*vaddr) {
- dev_err(&pdev->dev, "Failed to allocate wq page\n");
- goto err_alloc_wq_pages;
- }
-
- /* HW uses Big Endian Format */
- *paddr = cpu_to_be64(dma_addr);
- }
-
- return 0;
-
-err_alloc_wq_pages:
- free_wq_pages(wq, hwif, i);
- return -ENOMEM;
-}
-
-/**
- * hinic_wq_allocate - Allocate the WQ resources from the WQS
- * @wqs: WQ set from which to allocate the WQ resources
- * @wq: WQ to allocate resources for it from the WQ set
- * @wqebb_size: Work Queue Block Byte Size
- * @wq_page_size: the page size in the Work Queue
- * @q_depth: number of wqebbs in WQ
- * @max_wqe_size: maximum WQE size that will be used in the WQ
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u32 wq_page_size, u16 q_depth,
- u16 max_wqe_size)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 num_wqebbs_per_page;
- u16 wqebb_size_shift;
- int err;
-
- if (!is_power_of_2(wqebb_size)) {
- dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
- return -EINVAL;
- }
-
- if (wq_page_size == 0) {
- dev_err(&pdev->dev, "wq_page_size must be > 0\n");
- return -EINVAL;
- }
-
- if (q_depth & (q_depth - 1)) {
- dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
- return -EINVAL;
- }
-
- wqebb_size_shift = ilog2(wqebb_size);
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
- >> wqebb_size_shift;
-
- if (!is_power_of_2(num_wqebbs_per_page)) {
- dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
- return -EINVAL;
- }
-
- wq->hwif = hwif;
-
- err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed to get free wqs next block\n");
- return err;
- }
-
- wq->wqebb_size = wqebb_size;
- wq->wq_page_size = wq_page_size;
- wq->q_depth = q_depth;
- wq->max_wqe_size = max_wqe_size;
- wq->num_wqebbs_per_page = num_wqebbs_per_page;
- wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
- wq->wqebb_size_shift = wqebb_size_shift;
- wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
- wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
- wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
-
- err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate wq pages\n");
- goto err_alloc_wq_pages;
- }
-
- atomic_set(&wq->cons_idx, 0);
- atomic_set(&wq->prod_idx, 0);
- atomic_set(&wq->delta, q_depth);
- wq->mask = q_depth - 1;
-
- return 0;
-
-err_alloc_wq_pages:
- wqs_return_block(wqs, wq->page_idx, wq->block_idx);
- return err;
-}
-
-/**
- * hinic_wq_free - Free the WQ resources to the WQS
- * @wqs: WQ set to free the WQ resources to it
- * @wq: WQ to free its resources to the WQ set resources
- **/
-void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
-{
- free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
-
- wqs_return_block(wqs, wq->page_idx, wq->block_idx);
-}
-
-/**
- * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
- * @cmdq_pages: will hold the pages of the cmdq
- * @wq: returned wqs
- * @hwif: HW interface
- * @cmdq_blocks: number of cmdq blocks/wq to allocate
- * @wqebb_size: Work Queue Block Byte Size
- * @wq_page_size: the page size in the Work Queue
- * @q_depth: number of wqebbs in WQ
- * @max_wqe_size: maximum WQE size that will be used in the WQ
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
- u16 q_depth, u16 max_wqe_size)
-{
- struct pci_dev *pdev = hwif->pdev;
- u16 num_wqebbs_per_page_shift;
- u16 num_wqebbs_per_page;
- u16 wqebb_size_shift;
- int i, j, err = -ENOMEM;
-
- if (!is_power_of_2(wqebb_size)) {
- dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
- return -EINVAL;
- }
-
- if (wq_page_size == 0) {
- dev_err(&pdev->dev, "wq_page_size must be > 0\n");
- return -EINVAL;
- }
-
- if (q_depth & (q_depth - 1)) {
- dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
- return -EINVAL;
- }
-
- wqebb_size_shift = ilog2(wqebb_size);
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
- >> wqebb_size_shift;
-
- if (!is_power_of_2(num_wqebbs_per_page)) {
- dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
- return -EINVAL;
- }
-
- cmdq_pages->hwif = hwif;
-
- err = cmdq_allocate_page(cmdq_pages);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
- return err;
- }
- num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
-
- for (i = 0; i < cmdq_blocks; i++) {
- wq[i].hwif = hwif;
- wq[i].page_idx = 0;
- wq[i].block_idx = i;
-
- wq[i].wqebb_size = wqebb_size;
- wq[i].wq_page_size = wq_page_size;
- wq[i].q_depth = q_depth;
- wq[i].max_wqe_size = max_wqe_size;
- wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
- wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
- wq[i].wqebb_size_shift = wqebb_size_shift;
- wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
- wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
- wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
-
- err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
- CMDQ_WQ_MAX_PAGES);
- if (err) {
- dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n");
- goto err_cmdq_block;
- }
-
- atomic_set(&wq[i].cons_idx, 0);
- atomic_set(&wq[i].prod_idx, 0);
- atomic_set(&wq[i].delta, q_depth);
- wq[i].mask = q_depth - 1;
- }
-
- return 0;
-
-err_cmdq_block:
- for (j = 0; j < i; j++)
- free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
-
- cmdq_free_page(cmdq_pages);
- return err;
-}
-
-/**
- * hinic_wqs_cmdq_free - Free wqs from cmdqs
- * @cmdq_pages: hold the pages of the cmdq
- * @wq: wqs to free
- * @cmdq_blocks: number of wqs to free
- **/
-void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, int cmdq_blocks)
-{
- int i;
-
- for (i = 0; i < cmdq_blocks; i++)
- free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
-
- cmdq_free_page(cmdq_pages);
-}
-
-static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
- int num_wqebbs, u16 idx)
-{
- void *wqebb_addr;
- int i;
-
- for (i = 0; i < num_wqebbs; i++, idx++) {
- idx = MASKED_WQE_IDX(wq, idx);
- wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
- WQE_PAGE_OFF(wq, idx);
-
- memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
-
- shadow_addr += wq->wqebb_size;
- }
-}
-
-static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
- int num_wqebbs, u16 idx)
-{
- void *wqebb_addr;
- int i;
-
- for (i = 0; i < num_wqebbs; i++, idx++) {
- idx = MASKED_WQE_IDX(wq, idx);
- wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
- WQE_PAGE_OFF(wq, idx);
-
- memcpy(wqebb_addr, shadow_addr, wq->wqebb_size);
- shadow_addr += wq->wqebb_size;
- }
-}
-
-/**
- * hinic_get_wqe - get wqe ptr in the current pi and update the pi
- * @wq: wq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *prod_idx)
-{
- int curr_pg, end_pg, num_wqebbs;
- u16 curr_prod_idx, end_prod_idx;
-
- *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
-
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
-
- if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
- atomic_add(num_wqebbs, &wq->delta);
- return ERR_PTR(-EBUSY);
- }
-
- end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
-
- end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx);
- curr_prod_idx = end_prod_idx - num_wqebbs;
- curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
-
- /* end prod index points to the next wqebb, therefore minus 1 */
- end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1);
-
- curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
- end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
-
- *prod_idx = curr_prod_idx;
-
- /* If we only have one page, still need to get shadown wqe when
- * wqe rolling-over page
- */
- if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
- void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
-
- wq->shadow_idx[curr_pg] = *prod_idx;
- return shadow_addr;
- }
-
- return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
-}
-
-/**
- * hinic_return_wqe - return the wqe when transmit failed
- * @wq: wq to return wqe
- * @wqe_size: wqe size
- **/
-void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
-{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- atomic_sub(num_wqebbs, &wq->prod_idx);
-
- atomic_add(num_wqebbs, &wq->delta);
-}
-
-/**
- * hinic_put_wqe - return the wqe place to use for a new wqe
- * @wq: wq to return wqe
- * @wqe_size: wqe size
- **/
-void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
-{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
- >> wq->wqebb_size_shift;
-
- atomic_add(num_wqebbs, &wq->cons_idx);
-
- atomic_add(num_wqebbs, &wq->delta);
-}
-
-/**
- * hinic_read_wqe - read wqe ptr in the current ci
- * @wq: wq to get read from
- * @wqe_size: wqe size
- * @cons_idx: returned ci
- *
- * Return wqe pointer
- **/
-struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *cons_idx)
-{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
- >> wq->wqebb_size_shift;
- u16 curr_cons_idx, end_cons_idx;
- int curr_pg, end_pg;
-
- if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
- return ERR_PTR(-EBUSY);
-
- curr_cons_idx = atomic_read(&wq->cons_idx);
-
- curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
- end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
-
- curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
- end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
-
- *cons_idx = curr_cons_idx;
-
- /* If we only have one page, still need to get shadown wqe when
- * wqe rolling-over page
- */
- if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
- void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
- return shadow_addr;
- }
-
- return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
-}
-
-/**
- * hinic_read_wqe_direct - read wqe directly from ci position
- * @wq: wq
- * @cons_idx: ci position
- *
- * Return wqe
- **/
-struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx)
-{
- return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx);
-}
-
-/**
- * wqe_shadow - check if a wqe is shadow
- * @wq: wq of the wqe
- * @wqe: the wqe for shadow checking
- *
- * Return true - shadow, false - Not shadow
- **/
-static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe)
-{
- size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
-
- return WQE_IN_RANGE(wqe, wq->shadow_wqe,
- &wq->shadow_wqe[wqe_shadow_size]);
-}
-
-/**
- * hinic_write_wqe - write the wqe to the wq
- * @wq: wq to write wqe to
- * @wqe: wqe to write
- * @wqe_size: wqe size
- **/
-void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
- unsigned int wqe_size)
-{
- int curr_pg, num_wqebbs;
- void *shadow_addr;
- u16 prod_idx;
-
- if (wqe_shadow(wq, wqe)) {
- curr_pg = WQE_SHADOW_PAGE(wq, wqe);
-
- prod_idx = wq->shadow_idx[curr_pg];
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
- shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx);
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
deleted file mode 100644
index b06f8c0255de..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_WQ_H
-#define HINIC_HW_WQ_H
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/atomic.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-
-struct hinic_free_block {
- int page_idx;
- int block_idx;
-};
-
-struct hinic_wq {
- struct hinic_hwif *hwif;
-
- int page_idx;
- int block_idx;
-
- u16 wqebb_size;
- u32 wq_page_size;
- u16 q_depth;
- u16 max_wqe_size;
- u16 num_wqebbs_per_page;
- u16 wqebbs_per_page_shift;
- u16 wqebb_size_shift;
- /* The addresses are 64 bit in the HW */
- u64 block_paddr;
- void **shadow_block_vaddr;
- u64 *block_vaddr;
-
- int num_q_pages;
- u8 *shadow_wqe;
- u16 *shadow_idx;
-
- atomic_t cons_idx;
- atomic_t prod_idx;
- atomic_t delta;
- u16 mask;
-};
-
-struct hinic_wqs {
- struct hinic_hwif *hwif;
- int num_pages;
-
- /* The addresses are 64 bit in the HW */
- u64 *page_paddr;
- u64 **page_vaddr;
- void ***shadow_page_vaddr;
-
- struct hinic_free_block *free_blocks;
- int alloc_blk_pos;
- int return_blk_pos;
- int num_free_blks;
-
- /* Lock for getting a free block from the WQ set */
- struct semaphore alloc_blocks_lock;
-};
-
-struct hinic_cmdq_pages {
- /* The addresses are 64 bit in the HW */
- u64 page_paddr;
- u64 *page_vaddr;
- void **shadow_page_vaddr;
-
- struct hinic_hwif *hwif;
-};
-
-int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
- u16 q_depth, u16 max_wqe_size);
-
-void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, int cmdq_blocks);
-
-int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
- struct hinic_hwif *hwif);
-
-void hinic_wqs_free(struct hinic_wqs *wqs);
-
-int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u32 wq_page_size, u16 q_depth,
- u16 max_wqe_size);
-
-void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
-
-struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *prod_idx);
-
-void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size);
-
-void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size);
-
-struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *cons_idx);
-
-struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx);
-
-void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
- unsigned int wqe_size);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
deleted file mode 100644
index c6bdeed5606e..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ /dev/null
@@ -1,430 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_HW_WQE_H
-#define HINIC_HW_WQE_H
-
-#include "hinic_common.h"
-
-#define HINIC_CMDQ_CTRL_PI_SHIFT 0
-#define HINIC_CMDQ_CTRL_CMD_SHIFT 16
-#define HINIC_CMDQ_CTRL_MOD_SHIFT 24
-#define HINIC_CMDQ_CTRL_ACK_TYPE_SHIFT 29
-#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
-
-#define HINIC_CMDQ_CTRL_PI_MASK 0xFFFF
-#define HINIC_CMDQ_CTRL_CMD_MASK 0xFF
-#define HINIC_CMDQ_CTRL_MOD_MASK 0x1F
-#define HINIC_CMDQ_CTRL_ACK_TYPE_MASK 0x3
-#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1
-
-#define HINIC_CMDQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_CTRL_##member##_MASK) \
- << HINIC_CMDQ_CTRL_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTRL_GET(val, member) \
- (((val) >> HINIC_CMDQ_CTRL_##member##_SHIFT) \
- & HINIC_CMDQ_CTRL_##member##_MASK)
-
-#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
-#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
-#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
-#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_SHIFT 31
-
-#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFF
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3
-#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3
-#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_MASK 0x1
-
-#define HINIC_CMDQ_WQE_HEADER_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_WQE_HEADER_##member##_MASK) \
- << HINIC_CMDQ_WQE_HEADER_##member##_SHIFT)
-
-#define HINIC_CMDQ_WQE_HEADER_GET(val, member) \
- (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \
- & HINIC_CMDQ_WQE_HEADER_##member##_MASK)
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16
-#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22
-#define HINIC_SQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F
-#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1
-#define HINIC_SQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2
-#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10
-#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11
-#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
-#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27
-#define HINIC_SQ_CTRL_QUEUE_INFO_UC_SHIFT 28
-#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFF
-#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1
-#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1
-#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF
-#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1
-#define HINIC_SQ_CTRL_QUEUE_INFO_UC_MASK 0x1
-#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7
-
-#define HINIC_SQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \
- << HINIC_SQ_CTRL_##member##_SHIFT)
-
-#define HINIC_SQ_CTRL_GET(val, member) \
- (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \
- & HINIC_SQ_CTRL_##member##_MASK)
-
-#define HINIC_SQ_CTRL_CLEAR(val, member) \
- ((u32)(val) & (~(HINIC_SQ_CTRL_##member##_MASK \
- << HINIC_SQ_CTRL_##member##_SHIFT)))
-
-#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8
-#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
-#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12
-#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO0_TSO_FLAG_SHIFT 15
-#define HINIC_SQ_TASK_INFO0_VLAN_TAG_SHIFT 16
-
-#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_MASK 0x3
-#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3
-#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1
-#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO0_TSO_FLAG_MASK 0x1
-#define HINIC_SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFF
-
-#define HINIC_SQ_TASK_INFO0_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO0_##member##_MASK) << \
- HINIC_SQ_TASK_INFO0_##member##_SHIFT)
-
-/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8
-#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16
-#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24
-
-/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFF
-
-#define HINIC_SQ_TASK_INFO1_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \
- HINIC_SQ_TASK_INFO1_##member##_SHIFT)
-
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24
-/* 8 bits reserved */
-
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3
-/* 8 bits reserved */
-
-#define HINIC_SQ_TASK_INFO2_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO2_##member##_MASK) << \
- HINIC_SQ_TASK_INFO2_##member##_SHIFT)
-
-/* 31 bits reserved */
-#define HINIC_SQ_TASK_INFO4_L2TYPE_SHIFT 31
-
-/* 31 bits reserved */
-#define HINIC_SQ_TASK_INFO4_L2TYPE_MASK 0x1
-
-#define HINIC_SQ_TASK_INFO4_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO4_##member##_MASK) << \
- HINIC_SQ_TASK_INFO4_##member##_SHIFT)
-
-#define HINIC_RQ_CQE_STATUS_RXDONE_SHIFT 31
-
-#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1
-
-#define HINIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0
-
-#define HINIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU
-
-#define HINIC_RQ_CQE_STATUS_GET(val, member) \
- (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \
- HINIC_RQ_CQE_STATUS_##member##_MASK)
-
-#define HINIC_RQ_CQE_STATUS_CLEAR(val, member) \
- ((val) & (~(HINIC_RQ_CQE_STATUS_##member##_MASK << \
- HINIC_RQ_CQE_STATUS_##member##_SHIFT)))
-
-#define HINIC_RQ_CQE_SGE_LEN_SHIFT 16
-
-#define HINIC_RQ_CQE_SGE_LEN_MASK 0xFFFF
-
-#define HINIC_RQ_CQE_SGE_GET(val, member) \
- (((val) >> HINIC_RQ_CQE_SGE_##member##_SHIFT) & \
- HINIC_RQ_CQE_SGE_##member##_MASK)
-
-#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_RQ_CTRL_COMPLETE_FORMAT_SHIFT 15
-#define HINIC_RQ_CTRL_COMPLETE_LEN_SHIFT 27
-#define HINIC_RQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_RQ_CTRL_COMPLETE_FORMAT_MASK 0x1
-#define HINIC_RQ_CTRL_COMPLETE_LEN_MASK 0x3
-#define HINIC_RQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_RQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTRL_##member##_MASK) << \
- HINIC_RQ_CTRL_##member##_SHIFT)
-
-#define HINIC_SQ_WQE_SIZE(nr_sges) \
- (sizeof(struct hinic_sq_ctrl) + \
- sizeof(struct hinic_sq_task) + \
- (nr_sges) * sizeof(struct hinic_sq_bufdesc))
-
-#define HINIC_SCMD_DATA_LEN 16
-
-#define HINIC_MAX_SQ_BUFDESCS 17
-
-#define HINIC_SQ_WQE_MAX_SIZE 320
-#define HINIC_RQ_WQE_SIZE 32
-
-#define HINIC_MSS_DEFAULT 0x3E00
-#define HINIC_MSS_MIN 0x50
-
-#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16
-#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU
-
-#define RQ_CQE_STATUS_GET(val, member) (((val) >> \
- RQ_CQE_STATUS_##member##_SHIFT) & \
- RQ_CQE_STATUS_##member##_MASK)
-
-#define HINIC_GET_RX_NUM_LRO(status) \
- RQ_CQE_STATUS_GET(status, NUM_LRO)
-
-#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0
-#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU
-#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21
-#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U
-
-#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) (((val) >> \
- RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \
- RQ_CQE_OFFOLAD_TYPE_##member##_MASK)
-
-#define HINIC_GET_RX_PKT_TYPE(offload_type) \
- RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
-
-#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \
- RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN)
-
-#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU
-#define RQ_CQE_SGE_VLAN_SHIFT 0
-
-#define RQ_CQE_SGE_GET(val, member) (((val) >> \
- RQ_CQE_SGE_##member##_SHIFT) & \
- RQ_CQE_SGE_##member##_MASK)
-
-#define HINIC_GET_RX_VLAN_TAG(vlan_len) \
- RQ_CQE_SGE_GET(vlan_len, VLAN)
-
-#define HINIC_RSS_TYPE_VALID_SHIFT 23
-#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24
-#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25
-#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26
-#define HINIC_RSS_TYPE_IPV6_SHIFT 27
-#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28
-#define HINIC_RSS_TYPE_IPV4_SHIFT 29
-#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30
-#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31
-
-#define HINIC_RSS_TYPE_SET(val, member) \
- (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT)
-
-#define HINIC_RSS_TYPE_GET(val, member) \
- (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
-
-enum hinic_l3_offload_type {
- L3TYPE_UNKNOWN = 0,
- IPV6_PKT = 1,
- IPV4_PKT_NO_CHKSUM_OFFLOAD = 2,
- IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3,
-};
-
-enum hinic_l4_offload_type {
- OFFLOAD_DISABLE = 0,
- TCP_OFFLOAD_ENABLE = 1,
- SCTP_OFFLOAD_ENABLE = 2,
- UDP_OFFLOAD_ENABLE = 3,
-};
-
-enum hinic_l4_tunnel_type {
- NOT_TUNNEL,
- TUNNEL_UDP_NO_CSUM,
- TUNNEL_UDP_CSUM,
-};
-
-enum hinic_outer_l3type {
- HINIC_OUTER_L3TYPE_UNKNOWN = 0,
- HINIC_OUTER_L3TYPE_IPV6 = 1,
- HINIC_OUTER_L3TYPE_IPV4_NO_CHKSUM = 2,
- HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
-};
-
-enum hinic_l2type {
- HINIC_L2TYPE_ETH = 0,
-};
-
-struct hinic_cmdq_header {
- u32 header_info;
- u32 saved_data;
-};
-
-struct hinic_status {
- u32 status_info;
-};
-
-struct hinic_ctrl {
- u32 ctrl_info;
-};
-
-struct hinic_sge_resp {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_cmdq_completion {
- /* HW Format */
- union {
- struct hinic_sge_resp sge_resp;
- u64 direct_resp;
- };
-};
-
-struct hinic_scmd_bufdesc {
- u32 buf_len;
- u32 rsvd;
- u8 data[HINIC_SCMD_DATA_LEN];
-};
-
-struct hinic_lcmd_bufdesc {
- struct hinic_sge sge;
- u32 rsvd1;
- u64 rsvd2;
- u64 rsvd3;
-};
-
-struct hinic_cmdq_wqe_scmd {
- struct hinic_cmdq_header header;
- u64 rsvd;
- struct hinic_status status;
- struct hinic_ctrl ctrl;
- struct hinic_cmdq_completion completion;
- struct hinic_scmd_bufdesc buf_desc;
-};
-
-struct hinic_cmdq_wqe_lcmd {
- struct hinic_cmdq_header header;
- struct hinic_status status;
- struct hinic_ctrl ctrl;
- struct hinic_cmdq_completion completion;
- struct hinic_lcmd_bufdesc buf_desc;
-};
-
-struct hinic_cmdq_direct_wqe {
- struct hinic_cmdq_wqe_scmd wqe_scmd;
-};
-
-struct hinic_cmdq_wqe {
- /* HW Format */
- union {
- struct hinic_cmdq_direct_wqe direct_wqe;
- struct hinic_cmdq_wqe_lcmd wqe_lcmd;
- };
-};
-
-struct hinic_sq_ctrl {
- u32 ctrl_info;
- u32 queue_info;
-};
-
-struct hinic_sq_task {
- u32 pkt_info0;
- u32 pkt_info1;
- u32 pkt_info2;
- u32 ufo_v6_identify;
- u32 pkt_info4;
- u32 zero_pad;
-};
-
-struct hinic_sq_bufdesc {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_sq_wqe {
- struct hinic_sq_ctrl ctrl;
- struct hinic_sq_task task;
- struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS];
-};
-
-struct hinic_rq_cqe {
- u32 status;
- u32 len;
-
- u32 offload_type;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
- u32 rsvd6;
- u32 rsvd7;
-};
-
-struct hinic_rq_ctrl {
- u32 ctrl_info;
-};
-
-struct hinic_rq_cqe_sect {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_rq_bufdesc {
- u32 hi_addr;
- u32 lo_addr;
-};
-
-struct hinic_rq_wqe {
- struct hinic_rq_ctrl ctrl;
- u32 rsvd;
- struct hinic_rq_cqe_sect cqe_sect;
- struct hinic_rq_bufdesc buf_desc;
-};
-
-struct hinic_hw_wqe {
- /* HW Format */
- union {
- struct hinic_cmdq_wqe cmdq_wqe;
- struct hinic_sq_wqe sq_wqe;
- struct hinic_rq_wqe rq_wqe;
- };
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
new file mode 100644
index 000000000000..a7e3d2cf691e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c
@@ -0,0 +1,5001 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+#include "hinic_msix_attr.h"
+#include "hinic_nic_io.h"
+#include "hinic_eqs.h"
+#include "hinic_api_cmd.h"
+#include "hinic_mgmt.h"
+#include "hinic_mbox.h"
+#include "hinic_wq.h"
+#include "hinic_cmdq.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_hwif.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_multi_host_mgmt.h"
+
+#define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT 0
+#define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
+#define HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7
+
+#define HINIC_WAIT_IO_STATUS_TIMEOUT 100
+
+#define HINIC_FLR_TIMEOUT 1000
+
+#define HINIC_HT_GPA_PAGE_SIZE 4096UL
+
+#define HINIC_PPF_HT_GPA_SET_RETRY_TIMES 10
+
+#define HINIC_OK_FLAG_OK 0
+
+#define HINIC_OK_FLAG_FAILED 1
+
+#define HINIC_GET_SFP_INFO_REAL_TIME 0x1
+
+#define HINIC_GLB_SO_RO_CFG_SHIFT 0x0
+#define HINIC_GLB_SO_RO_CFG_MASK 0x1
+#define HINIC_DISABLE_ORDER 0
+#define HINIC_GLB_DMA_SO_RO_GET(val, member) \
+ (((val) >> HINIC_GLB_##member##_SHIFT) & HINIC_GLB_##member##_MASK)
+
+#define HINIC_GLB_DMA_SO_R0_CLEAR(val, member) \
+ ((val) & (~(HINIC_GLB_##member##_MASK << HINIC_GLB_##member##_SHIFT)))
+
+#define HINIC_GLB_DMA_SO_R0_SET(val, member) \
+ (((val) & HINIC_GLB_##member##_MASK) << HINIC_GLB_##member##_SHIFT)
+
+#define HINIC_MGMT_CHANNEL_STATUS_SHIFT 0x0
+#define HINIC_MGMT_CHANNEL_STATUS_MASK 0x1
+#define HINIC_ACTIVE_STATUS_MASK 0x80000000
+#define HINIC_ACTIVE_STATUS_CLEAR 0x7FFFFFFF
+#define HINIC_ACTIVE_UCODE 0x1F80 /* bit7~bit12 */
+
+#define HINIC_GET_MGMT_CHANNEL_STATUS(val, member) \
+ (((val) >> HINIC_##member##_SHIFT) & HINIC_##member##_MASK)
+
+#define HINIC_CLEAR_MGMT_CHANNEL_STATUS(val, member) \
+ ((val) & (~(HINIC_##member##_MASK << HINIC_##member##_SHIFT)))
+
+#define HINIC_SET_MGMT_CHANNEL_STATUS(val, member) \
+ (((val) & HINIC_##member##_MASK) << HINIC_##member##_SHIFT)
+
+#define HINIC_BOARD_IS_PHY(hwdev) \
+ ((hwdev)->board_info.board_type == 4 && \
+ (hwdev)->board_info.board_id == 24)
+
+struct comm_info_ht_gpa_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 rsvd1;
+ u32 rsvd2;
+
+ u64 page_pa0;
+ u64 page_pa1;
+};
+
+struct comm_info_eqm_fix {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 chunk_num;
+ u32 search_gpa_num;
+};
+
+struct comm_info_eqm_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 ppf_id;
+ u32 page_size;
+ u32 valid;
+};
+
+struct comm_info_eqm_search_gpa {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 start_idx;
+ u32 num;
+ u32 resv0;
+ u32 resv1;
+ u64 gpa_hi52[0]; /*lint !e1501 */
+};
+
+struct hinic_cons_idx_attr {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u32 sq_id;
+ u64 ci_addr;
+};
+
+struct hinic_clear_doorbell {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 rsvd1;
+};
+
+struct hinic_clear_resource {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 rsvd1;
+};
+
+struct hinic_msix_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 msix_index;
+ u8 pending_cnt;
+ u8 coalesct_timer_cnt;
+ u8 lli_tmier_cnt;
+ u8 lli_credit_cnt;
+ u8 resend_timer_cnt;
+ u8 rsvd1[3];
+};
+
+enum func_tmr_bitmap_status {
+ FUNC_TMR_BITMAP_DISABLE,
+ FUNC_TMR_BITMAP_ENABLE,
+};
+
+struct hinic_func_tmr_bitmap_op {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 op_id; /* 0:start; 1:stop */
+ u8 ppf_idx;
+ u32 rsvd1;
+};
+
+struct hinic_ppf_tmr_op {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 ppf_idx;
+ u8 op_id; /* 0: stop timer; 1:start timer */
+ u8 rsvd1[2];
+ u32 rsvd2;
+};
+
+struct hinic_cmd_set_res_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 state;
+ u8 rsvd1;
+ u32 rsvd2;
+};
+
+int hinic_hw_rx_buf_size[] = {
+ HINIC_RX_BUF_SIZE_32B,
+ HINIC_RX_BUF_SIZE_64B,
+ HINIC_RX_BUF_SIZE_96B,
+ HINIC_RX_BUF_SIZE_128B,
+ HINIC_RX_BUF_SIZE_192B,
+ HINIC_RX_BUF_SIZE_256B,
+ HINIC_RX_BUF_SIZE_384B,
+ HINIC_RX_BUF_SIZE_512B,
+ HINIC_RX_BUF_SIZE_768B,
+ HINIC_RX_BUF_SIZE_1K,
+ HINIC_RX_BUF_SIZE_1_5K,
+ HINIC_RX_BUF_SIZE_2K,
+ HINIC_RX_BUF_SIZE_3K,
+ HINIC_RX_BUF_SIZE_4K,
+ HINIC_RX_BUF_SIZE_8K,
+ HINIC_RX_BUF_SIZE_16K,
+};
+
+/* vf-pf dma attr table */
+struct hinic_vf_dma_attr_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 func_dma_entry_num;
+ u8 entry_idx;
+ u8 st;
+ u8 at;
+ u8 ph;
+ u8 no_snooping;
+ u8 tph_en;
+ u8 resv1[3];
+};
+
+struct hinic_led_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port;
+ u8 type;
+ u8 mode;
+ u8 reset;
+};
+
+struct hinic_comm_board_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_board_info info;
+
+ u32 rsvd1[4];
+};
+
+#define PHY_DOING_INIT_TIMEOUT (15 * 1000)
+
+struct hinic_phy_init_status {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 init_status;
+ u8 rsvd1[3];
+};
+
+enum phy_init_status_type {
+ PHY_INIT_DOING = 0,
+ PHY_INIT_SUCCESS = 1,
+ PHY_INIT_FAIL = 2,
+ PHY_NONSUPPORT = 3,
+};
+
+struct hinic_update_active {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 update_flag;
+ u32 update_status;
+};
+
+enum hinic_bios_cfg_op_code {
+ HINIC_BIOS_CFG_GET = 0,
+ HINIC_BIOS_CFG_PF_BW_LIMIT = 0x1 << 6,
+};
+
+struct hinic_bios_cfg_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 op_code;
+ u32 signature;
+
+ u8 rsvd1[12];
+ u32 pf_bw_limit;
+ u8 rsvd2[5];
+
+ u8 func_valid;
+ u8 func_idx;
+ u8 rsvd3;
+};
+
+struct hinic_mgmt_watchdog_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 curr_time_h;
+ u32 curr_time_l;
+ u32 task_id;
+ u32 rsv;
+
+ u32 reg[13];
+ u32 pc;
+ u32 lr;
+ u32 cpsr;
+
+ u32 stack_top;
+ u32 stack_bottom;
+ u32 sp;
+ u32 curr_used;
+ u32 peak_used;
+ u32 is_overflow;
+
+ u32 stack_actlen;
+ u8 data[1024];
+};
+
+struct hinic_fmw_act_ntc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 rsvd1[5];
+};
+
+struct hinic_ppf_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+ u8 ppf_state;
+ u8 rsvd1[3];
+};
+
+#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
+
+struct hinic_wq_page_size {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */
+ u8 page_size;
+
+ u32 rsvd1;
+};
+
+#define MAX_PCIE_DFX_BUF_SIZE 1024
+
+struct hinic_pcie_dfx_ntc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ int len;
+ u32 rsvd;
+};
+
+struct hinic_pcie_dfx_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 host_id;
+ u8 last;
+ u8 rsvd[2];
+ u32 offset;
+
+ u8 data[MAX_PCIE_DFX_BUF_SIZE];
+};
+
+struct hinic_hw_pf_infos_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_hw_pf_infos infos;
+};
+
+enum hinic_sdi_mode_ops {
+ HINIC_SDI_INFO_SET = 1U << 0, /* 1-save, 0-read */
+ HINIC_SDI_INFO_MODE = 1U << 1,
+};
+
+struct hinic_sdi_mode_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ /* Op-Code:
+ * Bit0: 0 - read configuration, 1 - write configuration
+ * Bit1: 0 - ignored, 1 - get/set SDI Mode
+ */
+ u32 opcode;
+ u32 signature;
+ u16 cur_sdi_mode;
+ u16 cfg_sdi_mode;
+
+ u32 rsvd1[29];
+};
+
+struct hinic_reg_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u32 reg_addr;
+ u32 val_length;
+
+ u32 data[2];
+};
+
+#define HINIC_DMA_ATTR_ENTRY_ST_SHIFT 0
+#define HINIC_DMA_ATTR_ENTRY_AT_SHIFT 8
+#define HINIC_DMA_ATTR_ENTRY_PH_SHIFT 10
+#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12
+#define HINIC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13
+
+#define HINIC_DMA_ATTR_ENTRY_ST_MASK 0xFF
+#define HINIC_DMA_ATTR_ENTRY_AT_MASK 0x3
+#define HINIC_DMA_ATTR_ENTRY_PH_MASK 0x3
+#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1
+#define HINIC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1
+
+#define HINIC_DMA_ATTR_ENTRY_SET(val, member) \
+ (((u32)(val) & HINIC_DMA_ATTR_ENTRY_##member##_MASK) << \
+ HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)
+
+#define HINIC_DMA_ATTR_ENTRY_CLEAR(val, member) \
+ ((val) & (~(HINIC_DMA_ATTR_ENTRY_##member##_MASK \
+ << HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)))
+
+#define HINIC_PCIE_ST_DISABLE 0
+#define HINIC_PCIE_AT_DISABLE 0
+#define HINIC_PCIE_PH_DISABLE 0
+
+#define PCIE_MSIX_ATTR_ENTRY 0
+
+struct hinic_cmd_fault_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_fault_event event;
+};
+
+static void hinic_enable_mgmt_channel(void *hwdev, void *buf_out);
+static void hinic_set_mgmt_channel_status(void *handle, bool state);
+
+#define HINIC_QUEUE_MIN_DEPTH 6
+#define HINIC_QUEUE_MAX_DEPTH 12
+#define HINIC_MAX_RX_BUFFER_SIZE 15
+
+#define CAP_INFO_MAC_LEN 512
+#define VENDOR_MAX_LEN 17
+
+static bool check_root_ctxt(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_root_ctxt *root_ctxt;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ root_ctxt = (struct hinic_root_ctxt *)buf_in;
+
+ if (root_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
+ return false;
+
+ if (root_ctxt->set_cmdq_depth) {
+ if (root_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH &&
+ root_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH)
+ return true;
+
+ return false;
+ }
+
+ if (root_ctxt->rq_depth >= HINIC_QUEUE_MIN_DEPTH &&
+ root_ctxt->rq_depth <= HINIC_QUEUE_MAX_DEPTH &&
+ root_ctxt->sq_depth >= HINIC_QUEUE_MIN_DEPTH &&
+ root_ctxt->sq_depth <= HINIC_QUEUE_MAX_DEPTH &&
+ root_ctxt->rx_buf_sz <= HINIC_MAX_RX_BUFFER_SIZE)
+ return true;
+
+ if (!root_ctxt->rq_depth && !root_ctxt->sq_depth &&
+ !root_ctxt->rx_buf_sz)
+ return true;
+
+ return false;
+}
+
+static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ return hinic_cmdq_check_vf_ctxt(hwdev, buf_in);
+}
+
+static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_wq_page_size *page_size_info = buf_in;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ if (page_size_info->ppf_idx != hinic_ppf_idx(hwdev))
+ return false;
+
+ if (((1U << page_size_info->page_size) * 0x1000) !=
+ HINIC_DEFAULT_WQ_PAGE_SIZE)
+ return false;
+
+ return true;
+}
+
+static bool __mbox_check_tmr_bitmap(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_func_tmr_bitmap_op *bitmap_op = buf_in;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ if (bitmap_op->op_id == FUNC_TMR_BITMAP_ENABLE) {
+ if (!hinic_get_ppf_status(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "PPF timer is not init, can't enable %d timer bitmap\n",
+ func_idx);
+ return false;
+ }
+ }
+
+ if (bitmap_op->ppf_idx != hinic_ppf_idx(hwdev))
+ return false;
+
+ return true;
+}
+
+struct vf_cmd_check_handle hw_cmd_support_vf[] = {
+ {HINIC_MGMT_CMD_START_FLR, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt},
+ {HINIC_MGMT_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt},
+ {HINIC_MGMT_CMD_VAT_SET, check_root_ctxt},
+ {HINIC_MGMT_CMD_VAT_GET, check_root_ctxt},
+ {HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B},
+
+ {HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B},
+
+ {HINIC_MGMT_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_PAGESIZE_SET, check_set_wq_page_size},
+ {HINIC_MGMT_CMD_PAGESIZE_GET, hinic_mbox_check_func_id_8B},
+ {HINIC_MGMT_CMD_GET_PPF_STATE, NULL},
+ {HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET, __mbox_check_tmr_bitmap},
+ {HINIC_MGMT_CMD_GET_BOARD_INFO, NULL},
+ {HINIC_MGMT_CMD_GET_SDI_MODE, NULL},
+};
+
+struct hinic_mgmt_status_log {
+ u8 status;
+ const char *log;
+};
+
+static struct hinic_mgmt_status_log mgmt_status_log[] = {
+ {HINIC_MGMT_STATUS_ERR_PARAM, "Invalid parameter"},
+ {HINIC_MGMT_STATUS_ERR_FAILED, "Operation failed"},
+ {HINIC_MGMT_STATUS_ERR_PORT, "Invalid port"},
+ {HINIC_MGMT_STATUS_ERR_TIMEOUT, "Operation time out"},
+ {HINIC_MGMT_STATUS_ERR_NOMATCH, "Version not match"},
+ {HINIC_MGMT_STATUS_ERR_EXIST, "Entry exists"},
+ {HINIC_MGMT_STATUS_ERR_NOMEM, "Out of memory"},
+ {HINIC_MGMT_STATUS_ERR_INIT, "Feature not initialized"},
+ {HINIC_MGMT_STATUS_ERR_FAULT, "Invalid address"},
+ {HINIC_MGMT_STATUS_ERR_PERM, "Operation not permitted"},
+ {HINIC_MGMT_STATUS_ERR_EMPTY, "Table empty"},
+ {HINIC_MGMT_STATUS_ERR_FULL, "Table full"},
+ {HINIC_MGMT_STATUS_ERR_NOT_FOUND, "Not found"},
+ {HINIC_MGMT_STATUS_ERR_BUSY, "Device or resource busy "},
+ {HINIC_MGMT_STATUS_ERR_RESOURCE, "No resources for operation "},
+ {HINIC_MGMT_STATUS_ERR_CONFIG, "Invalid configuration"},
+ {HINIC_MGMT_STATUS_ERR_UNAVAIL, "Feature unavailable"},
+ {HINIC_MGMT_STATUS_ERR_CRC, "CRC check failed"},
+ {HINIC_MGMT_STATUS_ERR_NXIO, "No such device or address"},
+ {HINIC_MGMT_STATUS_ERR_ROLLBACK, "Chip rollback fail"},
+ {HINIC_MGMT_STATUS_ERR_LEN, "Length too short or too long"},
+ {HINIC_MGMT_STATUS_ERR_UNSUPPORT, "Feature not supported"},
+};
+
+static void __print_status_info(struct hinic_hwdev *dev,
+ enum hinic_mod_type mod, u8 cmd, int index)
+{
+ if (mod == HINIC_MOD_COMM) {
+ sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s",
+ mod, cmd, mgmt_status_log[index].log);
+ } else if (mod == HINIC_MOD_L2NIC ||
+ mod == HINIC_MOD_HILINK) {
+ if (HINIC_IS_VF(dev) &&
+ (cmd == HINIC_PORT_CMD_SET_MAC ||
+ cmd == HINIC_PORT_CMD_DEL_MAC ||
+ cmd == HINIC_PORT_CMD_UPDATE_MAC) &&
+ mgmt_status_log[index].status == HINIC_PF_SET_VF_ALREADY)
+ return;
+
+ nic_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s",
+ mod, cmd, mgmt_status_log[index].log);
+ }
+}
+
+static bool hinic_status_need_special_handle(struct hinic_hwdev *dev,
+ enum hinic_mod_type mod,
+ u8 cmd, u8 status)
+{
+ if (mod == HINIC_MOD_L2NIC) {
+ /* optical module isn't plugged in */
+ if ((cmd == HINIC_PORT_CMD_GET_STD_SFP_INFO ||
+ cmd == HINIC_PORT_CMD_GET_SFP_INFO) &&
+ status == HINIC_MGMT_STATUS_ERR_NXIO)
+ return true;
+
+ if ((cmd == HINIC_PORT_CMD_SET_MAC ||
+ cmd == HINIC_PORT_CMD_UPDATE_MAC) &&
+ status == HINIC_MGMT_STATUS_ERR_EXIST)
+ return true;
+ }
+
+ if (status == HINIC_MGMT_STATUS_ERR_UNSUPPORT) {
+ if (mod == HINIC_MOD_L2NIC)
+ sdk_warn(dev->dev_hdl, "Mgmt command: mod(0x%x) cmd(0x%x) not supported\n",
+ mod, cmd);
+ else
+ sdk_warn(dev->dev_hdl, "Mgmt command: mod(0x%x) cmd(0x%x) not supported\n",
+ mod, cmd);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void hinic_print_status_info(void *hwdev, enum hinic_mod_type mod,
+ u8 cmd, const void *buf_out)
+{
+ struct hinic_hwdev *dev = hwdev;
+ int i, size;
+ u8 status;
+
+ if (!buf_out)
+ return;
+
+ if (mod != HINIC_MOD_COMM && mod != HINIC_MOD_L2NIC &&
+ mod != HINIC_MOD_HILINK)
+ return;
+
+ status = *(u8 *)buf_out;
+
+ if (!status)
+ return;
+
+ if (hinic_status_need_special_handle(dev, mod, cmd, status))
+ return;
+
+ size = ARRAY_SIZE(mgmt_status_log);
+ for (i = 0; i < size; i++) {
+ if (status == mgmt_status_log[i].status) {
+ __print_status_info(dev, mod, cmd, i);
+ return;
+ }
+ }
+
+ if (mod == HINIC_MOD_COMM) {
+ sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n",
+ mod, cmd, status);
+ } else if (mod == HINIC_MOD_L2NIC || mod == HINIC_MOD_HILINK) {
+ nic_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n",
+ mod, cmd, status);
+ }
+}
+
+void hinic_set_chip_present(void *hwdev)
+{
+ ((struct hinic_hwdev *)hwdev)->chip_present_flag = HINIC_CHIP_PRESENT;
+}
+
+void hinic_set_chip_absent(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ sdk_err(dev->dev_hdl, "Card not present\n");
+ dev->chip_present_flag = HINIC_CHIP_ABSENT;
+}
+
+int hinic_get_chip_present_flag(void *hwdev)
+{
+ int flag;
+
+ if (!hwdev)
+ return -EINVAL;
+ flag = ((struct hinic_hwdev *)hwdev)->chip_present_flag;
+ return flag;
+}
+EXPORT_SYMBOL(hinic_get_chip_present_flag);
+
+void hinic_force_complete_all(void *hwdev)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_recv_msg *recv_resp_msg;
+
+ set_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state);
+
+ if (hinic_func_type(dev) != TYPE_VF &&
+ hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_MGMT_INITED)) {
+ recv_resp_msg = &dev->pf_to_mgmt->recv_resp_msg_from_mgmt;
+ spin_lock_bh(&dev->pf_to_mgmt->sync_event_lock);
+ if (dev->pf_to_mgmt->event_flag == SEND_EVENT_START) {
+ complete(&recv_resp_msg->recv_done);
+ dev->pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT;
+ }
+ spin_unlock_bh(&dev->pf_to_mgmt->sync_event_lock);
+ }
+
+ /* only flush sync cmdq to avoid blocking remove */
+ if (hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_CMDQ_INITED))
+ hinic_cmdq_flush_cmd(hwdev,
+ &dev->cmdqs->cmdq[HINIC_CMDQ_SYNC]);
+
+ clear_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state);
+}
+
+void hinic_detect_hw_present(void *hwdev)
+{
+ u32 addr, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(((struct hinic_hwdev *)hwdev)->hwif, addr);
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ hinic_set_chip_absent(hwdev);
+ hinic_force_complete_all(hwdev);
+ }
+}
+
+void hinic_record_pcie_error(void *hwdev)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+
+ if (!hwdev)
+ return;
+
+ atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats);
+}
+
+static int __func_send_mbox(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ err = hinic_mbox_to_pf(hwdev, mod, cmd, buf_in,
+ in_size, buf_out,
+ out_size, timeout);
+ else if (NEED_MBOX_FORWARD(hwdev))
+ err = hinic_mbox_to_host_sync(hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size,
+ timeout);
+ else
+ err = -EFAULT;
+
+ return err;
+}
+
+static int __pf_to_mgmt_pre_handle(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd,
+ void *buf_in)
+{
+ struct hinic_update_active *active_info = buf_in;
+
+ if (hinic_get_mgmt_channel_status(hwdev)) {
+ if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC ||
+ mod == HINIC_MOD_CFGM || mod == HINIC_MOD_HILINK)
+ return HINIC_DEV_BUSY_ACTIVE_FW;
+ else
+ return -EBUSY;
+ }
+
+ /* When only hot activation of ucode, mgmt channel can still be used
+ * normally, otherwise it is not allowed to send commands to mgmt until
+ * the hot activation is completed
+ */
+ if (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_ACTIVATE_FW &&
+ (active_info->update_flag & ~HINIC_ACTIVE_UCODE)) {
+ hinic_set_mgmt_channel_status(hwdev, true);
+
+ /* Sleep 2s wait other pf's mgmt messages to complete */
+ msleep(2000);
+ }
+
+ return 0;
+}
+
+static void __pf_to_mgmt_after_handle(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd,
+ int sw_status, void *mgmt_status)
+{
+ /* if activate fw is failed, set channel valid */
+ if (mod == HINIC_MOD_COMM &&
+ cmd == HINIC_MGMT_CMD_ACTIVATE_FW) {
+ if (sw_status)
+ hinic_set_mgmt_channel_status(hwdev, false);
+ else
+ hinic_enable_mgmt_channel(hwdev, mgmt_status);
+ }
+}
+
+int hinic_pf_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_hwdev *dev = hwdev;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!((struct hinic_hwdev *)hwdev)->chip_present_flag)
+ return -EPERM;
+
+ if (NEED_MBOX_FORWARD(dev)) {
+ if (!hinic_is_hwdev_mod_inited(hwdev,
+ HINIC_HWDEV_MBOX_INITED)) {
+ return -EPERM;
+ }
+
+ err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ } else {
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED))
+ return -EPERM;
+
+ if (in_size > HINIC_MSG_TO_MGMT_MAX_LEN)
+ return -EINVAL;
+
+ err = __pf_to_mgmt_pre_handle(hwdev, mod, cmd, buf_in);
+ if (err)
+ return err;
+
+ err = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ __pf_to_mgmt_after_handle(hwdev, mod, cmd, err, buf_out);
+ }
+
+ return err;
+}
+
+static bool is_sfp_info_cmd_cached(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_cmd_get_sfp_qsfp_info *sfp_info = NULL;
+ struct hinic_port_routine_cmd *rt_cmd = NULL;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ sfp_info = buf_in;
+ if (sfp_info->port_id >= HINIC_MAX_PORT_ID ||
+ *out_size < sizeof(*sfp_info))
+ return false;
+
+ if (sfp_info->version == HINIC_GET_SFP_INFO_REAL_TIME)
+ return false;
+
+ rt_cmd = &chip_node->rt_cmd[sfp_info->port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ memcpy(buf_out, &rt_cmd->sfp_info, sizeof(*sfp_info));
+ mutex_unlock(&chip_node->sfp_mutex);
+
+ return true;
+}
+
+static bool is_sfp_abs_cmd_cached(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_cmd_get_light_module_abs *abs = NULL;
+ struct hinic_port_routine_cmd *rt_cmd = NULL;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ abs = buf_in;
+ if (abs->port_id >= HINIC_MAX_PORT_ID ||
+ *out_size < sizeof(*abs))
+ return false;
+
+ if (abs->version == HINIC_GET_SFP_INFO_REAL_TIME)
+ return false;
+
+ rt_cmd = &chip_node->rt_cmd[abs->port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ memcpy(buf_out, &rt_cmd->abs, sizeof(*abs));
+ mutex_unlock(&chip_node->sfp_mutex);
+
+ return true;
+}
+
+static bool driver_processed_cmd(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct card_node *chip_node = hwdev->chip_node;
+
+ if (mod == HINIC_MOD_L2NIC) {
+ if (cmd == HINIC_PORT_CMD_GET_SFP_INFO &&
+ chip_node->rt_cmd->up_send_sfp_info) {
+ return is_sfp_info_cmd_cached(hwdev, mod, cmd, buf_in,
+ in_size, buf_out,
+ out_size);
+ } else if (cmd == HINIC_PORT_CMD_GET_SFP_ABS &&
+ chip_node->rt_cmd->up_send_sfp_abs) {
+ return is_sfp_abs_cmd_cached(hwdev, mod, cmd, buf_in,
+ in_size, buf_out,
+ out_size);
+ }
+ }
+
+ return false;
+}
+
+int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_hwdev *dev = hwdev;
+ unsigned long end;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(dev->chip_present_flag))
+ return -EPERM;
+
+ end = jiffies + msecs_to_jiffies(HINIC_DEV_ACTIVE_FW_TIMEOUT);
+ if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) {
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MBOX_INITED))
+ return -EPERM;
+ do {
+ if (!hinic_get_chip_present_flag(hwdev))
+ break;
+
+ err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) {
+ hinic_print_status_info(hwdev, mod, cmd,
+ buf_out);
+ return err;
+ }
+
+ msleep(1000);
+ } while (time_before(jiffies, end));
+
+ err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ } else {
+ if (driver_processed_cmd(hwdev, mod, cmd, buf_in, in_size,
+ buf_out, out_size))
+ return 0;
+
+ do {
+ if (!hinic_get_mgmt_channel_status(hwdev) ||
+ !hinic_get_chip_present_flag(hwdev))
+ break;
+
+ msleep(1000);
+ } while (time_before(jiffies, end));
+ err = hinic_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size,
+ timeout);
+ }
+
+ hinic_print_status_info(hwdev, mod, cmd, buf_out);
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_msg_to_mgmt_sync);
+
+/* PF/VF send msg to uP by api cmd, and return immediately */
+int hinic_msg_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size)
+{
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) ||
+ !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) ||
+ hinic_get_mgmt_channel_status(hwdev))
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_VF) {
+ err = -EFAULT;
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Mailbox don't support async cmd\n");
+ } else {
+ err = hinic_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_msg_to_mgmt_async);
+
+int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_hwdev *dev = hwdev;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!(dev->chip_present_flag))
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) {
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MBOX_INITED))
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ err = hinic_mbox_to_pf_no_ack(hwdev, mod, cmd, buf_in,
+ in_size);
+ else
+ err = hinic_mbox_to_host_no_ack(hwdev, mod, cmd, buf_in,
+ in_size);
+ } else {
+ err = hinic_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size);
+ }
+
+ return err;
+}
+
+int hinic_mbox_to_vf(void *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = __hinic_mbox_to_vf(hwdev, mod, vf_id, cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+ if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) {
+ /* VF already in error condiction */
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, "VF%d not initialized, disconnect it\n",
+ vf_id);
+ hinic_unregister_vf_msg_handler(hwdev, vf_id);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_mbox_to_vf);
+
+int hinic_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
+
+{
+ struct hinic_hwdev *dev = hwdev;
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (!dev->chip_present_flag)
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev))
+ return -EINVAL;
+
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CLP_INITED))
+ return -EPERM;
+
+ err = hinic_pf_clp_to_mgmt(dev, mod, cmd, buf_in,
+ in_size, buf_out, out_size);
+
+ return err;
+}
+
+/**
+ * hinic_cpu_to_be32 - convert data to big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert, must be Multiple of 4B
+ */
+void hinic_cpu_to_be32(void *data, int len)
+{
+ int i, chunk_sz = sizeof(u32);
+ u32 *mem = data;
+
+ if (!data)
+ return;
+
+ len = len / chunk_sz;
+
+ for (i = 0; i < len; i++) {
+ *mem = cpu_to_be32(*mem);
+ mem++;
+ }
+}
+EXPORT_SYMBOL(hinic_cpu_to_be32);
+
+/**
+ * hinic_be32_to_cpu - convert data from big endian 32 bit format
+ * @data: the data to convert
+ * @len: length of data to convert
+ */
+void hinic_be32_to_cpu(void *data, int len)
+{
+ int i, chunk_sz = sizeof(u32);
+ u32 *mem = data;
+
+ if (!data)
+ return;
+
+ len = len / chunk_sz;
+
+ for (i = 0; i < len; i++) {
+ *mem = be32_to_cpu(*mem);
+ mem++;
+ }
+}
+EXPORT_SYMBOL(hinic_be32_to_cpu);
+
+/**
+ * hinic_set_sge - set dma area in scatter gather entry
+ * @sge: scatter gather entry
+ * @addr: dma address
+ * @len: length of relevant data in the dma address
+ */
+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len)
+{
+ sge->hi_addr = upper_32_bits(addr);
+ sge->lo_addr = lower_32_bits(addr);
+ sge->len = len;
+}
+
+/**
+ * hinic_sge_to_dma - get dma address from scatter gather entry
+ * @sge: scatter gather entry
+ *
+ * Return dma address of sg entry
+ */
+dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge)
+{
+ return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr);
+}
+
+int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr)
+{
+ struct hinic_cons_idx_attr cons_idx_attr = {0};
+ u16 out_size = sizeof(cons_idx_attr);
+ int err;
+
+ if (!hwdev || !attr)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &cons_idx_attr.func_idx);
+ if (err)
+ return err;
+
+ cons_idx_attr.dma_attr_off = attr->dma_attr_off;
+ cons_idx_attr.pending_limit = attr->pending_limit;
+ cons_idx_attr.coalescing_time = attr->coalescing_time;
+
+ if (attr->intr_en) {
+ cons_idx_attr.intr_en = attr->intr_en;
+ cons_idx_attr.intr_idx = attr->intr_idx;
+ }
+
+ cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+ cons_idx_attr.sq_id = q_id;
+
+ cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET,
+ &cons_idx_attr, sizeof(cons_idx_attr),
+ &cons_idx_attr, &out_size, 0);
+ if (err || !out_size || cons_idx_attr.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, cons_idx_attr.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_ci_table);
+
+static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct hinic_root_ctxt root_ctxt = {0};
+ u16 out_size = sizeof(root_ctxt);
+ int err;
+
+ err = hinic_global_func_id_get(hwdev, &root_ctxt.func_idx);
+ if (err)
+ return err;
+
+ root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
+
+ root_ctxt.set_cmdq_depth = 1;
+ root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ &root_ctxt, &out_size, 0);
+ if (err || !out_size || root_ctxt.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, root_ctxt.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static u16 get_hw_rx_buf_size(int rx_buf_sz)
+{
+ u16 num_hw_types =
+ sizeof(hinic_hw_rx_buf_size) /
+ sizeof(hinic_hw_rx_buf_size[0]);
+ u16 i;
+
+ for (i = 0; i < num_hw_types; i++) {
+ if (hinic_hw_rx_buf_size[i] == rx_buf_sz)
+ return i;
+ }
+
+ pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz);
+
+ return DEFAULT_RX_BUF_SIZE; /* default 2K */
+}
+
+int hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz)
+{
+ struct hinic_root_ctxt root_ctxt = {0};
+ u16 out_size = sizeof(root_ctxt);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &root_ctxt.func_idx);
+ if (err)
+ return err;
+
+ root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
+
+ root_ctxt.set_cmdq_depth = 0;
+ root_ctxt.cmdq_depth = 0;
+
+ root_ctxt.lro_en = 1;
+
+ root_ctxt.rq_depth = (u16)ilog2(rq_depth);
+ root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz);
+ root_ctxt.sq_depth = (u16)ilog2(sq_depth);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ &root_ctxt, &out_size, 0);
+ if (err || !out_size || root_ctxt.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, root_ctxt.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_root_ctxt);
+
+int hinic_clean_root_ctxt(void *hwdev)
+{
+ struct hinic_root_ctxt root_ctxt = {0};
+ u16 out_size = sizeof(root_ctxt);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &root_ctxt.func_idx);
+ if (err)
+ return err;
+
+ root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_VAT_SET,
+ &root_ctxt, sizeof(root_ctxt),
+ &root_ctxt, &out_size, 0);
+ if (err || !out_size || root_ctxt.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to clean root context, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, root_ctxt.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_clean_root_ctxt);
+
+static int wait_for_flr_finish(struct hinic_hwif *hwif)
+{
+ u32 cnt = 0;
+ enum hinic_pf_status status;
+
+ while (cnt < HINIC_FLR_TIMEOUT) {
+ status = hinic_get_pf_status(hwif);
+ if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) {
+ hinic_set_pf_status(hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
+ return 0;
+ }
+
+ usleep_range(9900, 10000);
+ cnt++;
+ }
+
+ return -EFAULT;
+}
+
+#define HINIC_WAIT_CMDQ_IDLE_TIMEOUT 5000
+
+static int wait_cmdq_stop(struct hinic_hwdev *hwdev)
+{
+ enum hinic_cmdq_type cmdq_type;
+ struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+ u32 cnt = 0;
+ int err = 0;
+
+ if (!(cmdqs->status & HINIC_CMDQ_ENABLE))
+ return 0;
+
+ cmdqs->status &= ~HINIC_CMDQ_ENABLE;
+
+ while (cnt < HINIC_WAIT_CMDQ_IDLE_TIMEOUT && hwdev->chip_present_flag) {
+ err = 0;
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) {
+ err = -EBUSY;
+ break;
+ }
+ }
+
+ if (!err)
+ return 0;
+
+ usleep_range(500, 1000);
+ cnt++;
+ }
+
+ cmdq_type = HINIC_CMDQ_SYNC;
+ for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ sdk_err(hwdev->dev_hdl, "Cmdq %d busy\n", cmdq_type);
+ }
+
+ cmdqs->status |= HINIC_CMDQ_ENABLE;
+
+ return err;
+}
+
+static int hinic_vf_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+ struct hinic_clear_resource clr_res = {0};
+ int err;
+
+ err = wait_cmdq_stop(hwdev);
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Cmdq is still working, please check CMDQ timeout value is reasonable\n");
+
+ err = hinic_global_func_id_get(hwdev, &clr_res.func_idx);
+ if (err)
+ return err;
+
+ clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ err = hinic_mbox_to_pf_no_ack(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_START_FLR, &clr_res,
+ sizeof(clr_res));
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n");
+
+ /* PF firstly set VF doorbell flush csr to be disabled. After PF finish
+ * VF resources flush, PF will set VF doorbell flush csr to be enabled.
+ */
+ err = wait_until_doorbell_flush_states(hwdev->hwif, DISABLE_DOORBELL);
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Wait doorbell flush disable timeout\n");
+ err = wait_until_doorbell_flush_states(hwdev->hwif, ENABLE_DOORBELL);
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Wait doorbell flush enable timeout\n");
+
+ err = hinic_reinit_cmdq_ctxts(hwdev);
+ if (err)
+ sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n");
+
+ return 0;
+}
+
+static void hinic_pf_set_vf_db_flush(struct hinic_hwdev *hwdev, u16 vf_id,
+ enum hinic_doorbell_ctrl val)
+{
+ u32 addr, vf_attr4;
+
+ addr = HINIC_PF_CSR_VF_FLUSH_OFF(vf_id);
+ vf_attr4 = hinic_hwif_read_reg(hwdev->hwif, addr);
+ vf_attr4 = HINIC_AF4_CLEAR(vf_attr4, DOORBELL_CTRL);
+ vf_attr4 |= HINIC_AF4_SET(val, DOORBELL_CTRL);
+ hinic_hwif_write_reg(hwdev->hwif, addr, vf_attr4);
+}
+
+static int hinic_vf_rx_tx_flush_in_pf(struct hinic_hwdev *hwdev, u16 vf_id)
+{
+ struct hinic_clear_doorbell clear_db = {0};
+ struct hinic_clear_resource clr_res = {0};
+ u16 glb_vf_func_id;
+ u16 out_size;
+ int err;
+ int ret = 0;
+
+ /* disable vf doorbell flush csr */
+ hinic_pf_set_vf_db_flush(hwdev, vf_id, DISABLE_DOORBELL);
+
+ /* doorbell flush */
+ out_size = sizeof(clear_db);
+ glb_vf_func_id = HINIC_HWIF_GLOBAL_VF_OFFSET(hwdev->hwif) + vf_id;
+ clear_db.func_idx = glb_vf_func_id;
+ clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db,
+ sizeof(clear_db), &clear_db, &out_size, 0);
+ if (err || !out_size || clear_db.status) {
+ sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, clear_db.status, out_size);
+ if (err)
+ ret = err;
+ else
+ ret = -EFAULT;
+ }
+
+ /* wait ucode stop I/O */
+ msleep(100);
+
+ /* notice up begine vf flush */
+ out_size = sizeof(clr_res);
+ clr_res.func_idx = glb_vf_func_id;
+ clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_START_FLR, &clr_res,
+ sizeof(clr_res), &clr_res, &out_size, 0);
+ if (err || !out_size || clr_res.status) {
+ sdk_warn(hwdev->dev_hdl, "Failed to start flr, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, clr_res.status, out_size);
+ ret = err ? err : (-EFAULT);
+ }
+ /* enable vf doorbell flush csr */
+ hinic_pf_set_vf_db_flush(hwdev, vf_id, ENABLE_DOORBELL);
+
+ return ret;
+}
+
+static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_clear_doorbell clear_db = {0};
+ struct hinic_clear_resource clr_res = {0};
+ u16 out_size, func_id;
+ int err;
+ int ret = 0;
+
+ /* wait ucode stop I/O */
+ msleep(100);
+
+ err = wait_cmdq_stop(hwdev);
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n");
+ ret = err;
+ }
+
+ hinic_disable_doorbell(hwif);
+
+ out_size = sizeof(clear_db);
+ func_id = hinic_global_func_id_hw(hwdev);
+ clear_db.func_idx = func_id;
+ clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db,
+ sizeof(clear_db), &clear_db, &out_size, 0);
+ if (err || !out_size || clear_db.status) {
+ sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, clear_db.status, out_size);
+ ret = err ? err : (-EFAULT);
+ }
+
+ hinic_set_pf_status(hwif, HINIC_PF_STATUS_FLR_START_FLAG);
+
+ clr_res.func_idx = func_id;
+ clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
+
+ err = hinic_msg_to_mgmt_no_ack(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_START_FLR, &clr_res,
+ sizeof(clr_res));
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n");
+ ret = err;
+ }
+
+ err = wait_for_flr_finish(hwif);
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n");
+ ret = err;
+ }
+
+ hinic_enable_doorbell(hwif);
+
+ err = hinic_reinit_cmdq_ctxts(hwdev);
+ if (err) {
+ sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n");
+ ret = err;
+ }
+
+ return ret;
+}
+
+int hinic_func_rx_tx_flush(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!dev->chip_present_flag)
+ return 0;
+
+ if (HINIC_FUNC_TYPE(dev) == TYPE_VF)
+ return hinic_vf_rx_tx_flush(dev);
+ else
+ return hinic_pf_rx_tx_flush(dev);
+}
+EXPORT_SYMBOL(hinic_func_rx_tx_flush);
+
+int hinic_get_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info *interrupt_info)
+{
+ struct hinic_hwdev *nic_hwdev = hwdev;
+ struct hinic_msix_config msix_cfg = {0};
+ u16 out_size = sizeof(msix_cfg);
+ int err;
+
+ if (!hwdev || !interrupt_info)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &msix_cfg.func_id);
+ if (err)
+ return err;
+
+ msix_cfg.msix_index = interrupt_info->msix_index;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
+ &msix_cfg, sizeof(msix_cfg),
+ &msix_cfg, &out_size, 0);
+ if (err || !out_size || msix_cfg.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, msix_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt;
+ interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt;
+ interrupt_info->pending_limt = msix_cfg.pending_cnt;
+ interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt;
+ interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_interrupt_cfg);
+
+int hinic_set_interrupt_cfg_direct(void *hwdev,
+ struct nic_interrupt_info *interrupt_info)
+{
+ struct hinic_hwdev *nic_hwdev = hwdev;
+ struct hinic_msix_config msix_cfg = {0};
+ u16 out_size = sizeof(msix_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &msix_cfg.func_id);
+ if (err)
+ return err;
+
+ msix_cfg.msix_index = (u16)interrupt_info->msix_index;
+ msix_cfg.lli_credit_cnt = interrupt_info->lli_credit_limit;
+ msix_cfg.lli_tmier_cnt = interrupt_info->lli_timer_cfg;
+ msix_cfg.pending_cnt = interrupt_info->pending_limt;
+ msix_cfg.coalesct_timer_cnt = interrupt_info->coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = interrupt_info->resend_timer_cfg;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
+ &msix_cfg, sizeof(msix_cfg),
+ &msix_cfg, &out_size, 0);
+ if (err || !out_size || msix_cfg.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, msix_cfg.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_set_interrupt_cfg(void *hwdev,
+ struct nic_interrupt_info interrupt_info)
+{
+ struct nic_interrupt_info temp_info;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ temp_info.msix_index = interrupt_info.msix_index;
+
+ err = hinic_get_interrupt_cfg(hwdev, &temp_info);
+ if (err)
+ return -EINVAL;
+
+ if (!interrupt_info.lli_set) {
+ interrupt_info.lli_credit_limit = temp_info.lli_credit_limit;
+ interrupt_info.lli_timer_cfg = temp_info.lli_timer_cfg;
+ }
+
+ if (!interrupt_info.interrupt_coalesc_set) {
+ interrupt_info.pending_limt = temp_info.pending_limt;
+ interrupt_info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg;
+ interrupt_info.resend_timer_cfg = temp_info.resend_timer_cfg;
+ }
+
+ return hinic_set_interrupt_cfg_direct(hwdev, &interrupt_info);
+}
+EXPORT_SYMBOL(hinic_set_interrupt_cfg);
+
+void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx,
+ u8 clear_resend_en)
+{
+ struct hinic_hwif *hwif;
+ u32 msix_ctrl = 0, addr;
+
+ if (!hwdev)
+ return;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ msix_ctrl = HINIC_MSIX_CNT_SET(clear_resend_en, RESEND_TIMER);
+
+ addr = HINIC_CSR_MSIX_CNT_ADDR(msix_idx);
+
+ hinic_hwif_write_reg(hwif, addr, msix_ctrl);
+}
+EXPORT_SYMBOL(hinic_misx_intr_clear_resend_bit);
+
+static int init_aeqs_msix_attr(struct hinic_hwdev *hwdev)
+{
+ struct hinic_aeqs *aeqs = hwdev->aeqs;
+ struct nic_interrupt_info info = {0};
+ struct hinic_eq *eq;
+ int q_id;
+ int err;
+
+ info.lli_set = 0;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = aeqs->num_aeqs - 1; q_id >= 0; q_id--) {
+ eq = &aeqs->aeq[q_id];
+ info.msix_index = eq->eq_irq.msix_entry_idx;
+ err = hinic_set_interrupt_cfg_direct(hwdev, &info);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to set msix attr for aeq %d\n",
+ q_id);
+ return -EFAULT;
+ }
+ }
+
+ hinic_set_mbox_seg_ack_mod(hwdev, HINIC_MBOX_SEND_MSG_INT);
+
+ return 0;
+}
+
+static int init_ceqs_msix_attr(struct hinic_hwdev *hwdev)
+{
+ struct hinic_ceqs *ceqs = hwdev->ceqs;
+ struct nic_interrupt_info info = {0};
+ struct hinic_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.lli_set = 0;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = &ceqs->ceq[q_id];
+ info.msix_index = eq->eq_irq.msix_entry_idx;
+ err = hinic_set_interrupt_cfg(hwdev, info);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to set msix attr for ceq %d\n",
+ q_id);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * set_pf_dma_attr_entry - set the dma attributes for entry
+ * @hwdev: the pointer to hw device
+ * @entry_idx: the entry index in the dma table
+ * @st: PCIE TLP steering tag
+ * @at: PCIE TLP AT field
+ * @ph: PCIE TLP Processing Hint field
+ * @no_snooping: PCIE TLP No snooping
+ * @tph_en: PCIE TLP Processing Hint Enable
+ */
+static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx,
+ u8 st, u8 at, u8 ph,
+ enum hinic_pcie_nosnoop no_snooping,
+ enum hinic_pcie_tph tph_en)
+{
+ u32 addr, val, dma_attr_entry;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_DMA_ATTR_TBL_ADDR(entry_idx);
+
+ val = hinic_hwif_read_reg(hwdev->hwif, addr);
+ val = HINIC_DMA_ATTR_ENTRY_CLEAR(val, ST) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, AT) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, PH) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) &
+ HINIC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN);
+
+ dma_attr_entry = HINIC_DMA_ATTR_ENTRY_SET(st, ST) |
+ HINIC_DMA_ATTR_ENTRY_SET(at, AT) |
+ HINIC_DMA_ATTR_ENTRY_SET(ph, PH) |
+ HINIC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) |
+ HINIC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN);
+
+ val |= dma_attr_entry;
+ hinic_hwif_write_reg(hwdev->hwif, addr, val);
+}
+
+static int set_vf_dma_attr_entry(struct hinic_hwdev *hwdev, u8 entry_idx,
+ u8 st, u8 at, u8 ph,
+ enum hinic_pcie_nosnoop no_snooping,
+ enum hinic_pcie_tph tph_en)
+{
+ struct hinic_vf_dma_attr_table attr = {0};
+ u16 out_size = sizeof(attr);
+ int err;
+
+ err = hinic_global_func_id_get(hwdev, &attr.func_idx);
+ if (err)
+ return err;
+
+ attr.func_dma_entry_num = hinic_dma_attr_entry_num(hwdev);
+ attr.entry_idx = entry_idx;
+ attr.st = st;
+ attr.at = at;
+ attr.ph = ph;
+ attr.no_snooping = no_snooping;
+ attr.tph_en = tph_en;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_DMA_ATTR_SET, &attr,
+ sizeof(attr), &attr, &out_size, 0);
+ if (err || !out_size || attr.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set dma attribute, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, attr.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * dma_attr_table_init - initialize the default dma attributes
+ * @hwdev: the pointer to hw device
+ * Return: 0 - success, negative - failure
+ */
+static int dma_attr_table_init(struct hinic_hwdev *hwdev)
+{
+ int err = 0;
+
+ if (HINIC_IS_VF(hwdev))
+ err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
+ HINIC_PCIE_ST_DISABLE,
+ HINIC_PCIE_AT_DISABLE,
+ HINIC_PCIE_PH_DISABLE,
+ HINIC_PCIE_SNOOP,
+ HINIC_PCIE_TPH_DISABLE);
+ else
+ set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
+ HINIC_PCIE_ST_DISABLE,
+ HINIC_PCIE_AT_DISABLE,
+ HINIC_PCIE_PH_DISABLE,
+ HINIC_PCIE_SNOOP,
+ HINIC_PCIE_TPH_DISABLE);
+
+ return err;
+}
+
+static int resources_state_set(struct hinic_hwdev *hwdev,
+ enum hinic_res_state state)
+{
+ struct hinic_cmd_set_res_state res_state = {0};
+ u16 out_size = sizeof(res_state);
+ int err;
+
+ err = hinic_global_func_id_get(hwdev, &res_state.func_idx);
+ if (err)
+ return err;
+
+ res_state.state = state;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_RES_STATE_SET,
+ &res_state, sizeof(res_state),
+ &res_state, &out_size, 0);
+ if (err || !out_size || res_state.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set resources state, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, res_state.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void comm_mgmt_msg_handler(void *hwdev, void *pri_handle, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt = pri_handle;
+ u8 cmd_idx;
+ u32 *mem;
+ u16 i;
+
+ for (cmd_idx = 0; cmd_idx < pf_to_mgmt->proc.cmd_num; cmd_idx++) {
+ if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) {
+ if (!pf_to_mgmt->proc.info[cmd_idx].proc) {
+ sdk_warn(pf_to_mgmt->hwdev->dev_hdl,
+ "PF recv up comm msg handle null, cmd(0x%x)\n",
+ cmd);
+ } else {
+ pf_to_mgmt->proc.info[cmd_idx].proc(hwdev,
+ buf_in, in_size, buf_out, out_size);
+ }
+
+ return;
+ }
+ }
+
+ sdk_warn(pf_to_mgmt->hwdev->dev_hdl, "Received mgmt cpu event: 0x%x\n",
+ cmd);
+
+ mem = buf_in;
+ for (i = 0; i < (in_size / sizeof(u32)); i++) {
+ pr_info("0x%x\n", *mem);
+ mem++;
+ }
+
+ *out_size = 0;
+}
+
+static int hinic_vf_get_ppf_init_state(void *handle, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+ struct hinic_ppf_state *ppf_state = buf_out;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ ppf_state->ppf_state = (u8)chip_node->ppf_state;
+
+ *out_size = sizeof(*ppf_state);
+
+ return 0;
+}
+
+int hinic_get_sdi_mode(struct hinic_hwdev *hwdev, u16 *cur_mode)
+{
+ struct hinic_sdi_mode_info sdi_mode = {0};
+ u16 out_size = sizeof(sdi_mode);
+ int err;
+
+ sdi_mode.opcode = HINIC_SDI_INFO_MODE & (~HINIC_SDI_INFO_SET);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_SDI_MODE, &sdi_mode,
+ sizeof(sdi_mode), &sdi_mode, &out_size, 0);
+ if ((sdi_mode.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ sdi_mode.status) || err || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get sdi mode info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, sdi_mode.status, out_size);
+ return -EFAULT;
+ }
+
+ *cur_mode = sdi_mode.cur_sdi_mode;
+
+ return sdi_mode.status;
+}
+
+int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ int err = 0;
+ u8 size = ARRAY_SIZE(hw_cmd_support_vf);
+
+ if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd,
+ buf_in, in_size, size)) {
+ sdk_err(((struct hinic_hwdev *)handle)->dev_hdl,
+ "PF Receive VF(%d) common cmd(0x%x) or mbox len(0x%x) is invalid\n",
+ vf_id + hinic_glb_pf_vf_offset(handle), cmd, in_size);
+ err = HINIC_MBOX_VF_CMD_ERROR;
+ return err;
+ }
+
+ if (cmd == HINIC_MGMT_CMD_START_FLR) {
+ *out_size = 0;
+ err = hinic_vf_rx_tx_flush_in_pf(handle, vf_id);
+ } else if (cmd == HINIC_MGMT_CMD_GET_PPF_STATE) {
+ err = hinic_vf_get_ppf_init_state(handle, buf_out, out_size);
+ } else {
+ err = hinic_pf_msg_to_mgmt_sync(handle, HINIC_MOD_COMM, cmd,
+ buf_in, in_size, buf_out,
+ out_size, 0U);
+ if (err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ sdk_err(((struct hinic_hwdev *)handle)->dev_hdl,
+ "PF mbox common cmd %d callback handler err: %d\n",
+ cmd, err);
+ }
+
+ return err;
+}
+
+static int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev)
+{
+ struct irq_info aeq_irqs[HINIC_MAX_AEQS] = {{0} };
+ u16 num_aeqs, resp_num_irq = 0, i;
+ int err;
+
+ num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
+ if (num_aeqs > HINIC_MAX_AEQS) {
+ sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n",
+ HINIC_MAX_AEQS);
+ num_aeqs = HINIC_MAX_AEQS;
+ }
+ err = hinic_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs,
+ &resp_num_irq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %d\n",
+ num_aeqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_aeqs) {
+ sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n",
+ resp_num_irq);
+ num_aeqs = resp_num_irq;
+ }
+
+ err = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n");
+ goto aeqs_init_err;
+ }
+
+ set_bit(HINIC_HWDEV_AEQ_INITED, &hwdev->func_state);
+
+ return 0;
+
+aeqs_init_err:
+ for (i = 0; i < num_aeqs; i++)
+ hinic_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id);
+
+ return err;
+}
+
+static void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev)
+{
+ struct irq_info aeq_irqs[HINIC_MAX_AEQS] = {{0} };
+ u16 num_irqs, i;
+
+ clear_bit(HINIC_HWDEV_AEQ_INITED, &hwdev->func_state);
+
+ hinic_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs);
+ hinic_aeqs_free(hwdev);
+ for (i = 0; i < num_irqs; i++)
+ hinic_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id);
+}
+
+static int hinic_comm_ceqs_init(struct hinic_hwdev *hwdev)
+{
+ struct irq_info ceq_irqs[HINIC_MAX_CEQS] = {{0} };
+ u16 num_ceqs, resp_num_irq = 0, i;
+ int err;
+
+ num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif);
+ if (num_ceqs > HINIC_MAX_CEQS) {
+ sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n",
+ HINIC_MAX_CEQS);
+ num_ceqs = HINIC_MAX_CEQS;
+ }
+
+ err = hinic_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs,
+ &resp_num_irq);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %d\n",
+ num_ceqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_ceqs) {
+ sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n",
+ resp_num_irq);
+ num_ceqs = resp_num_irq;
+ }
+
+ err = hinic_ceqs_init(hwdev, num_ceqs, ceq_irqs);
+ if (err) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed to init ceqs, err:%d\n", err);
+ goto ceqs_init_err;
+ }
+
+ return 0;
+
+ceqs_init_err:
+ for (i = 0; i < num_ceqs; i++)
+ hinic_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id);
+
+ return err;
+}
+
+static void hinic_comm_ceqs_free(struct hinic_hwdev *hwdev)
+{
+ struct irq_info ceq_irqs[HINIC_MAX_CEQS] = {{0} };
+ u16 num_irqs;
+ int i;
+
+ hinic_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs);
+ hinic_ceqs_free(hwdev);
+ for (i = 0; i < num_irqs; i++)
+ hinic_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id);
+}
+
+static int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_func_to_func_init(hwdev);
+ if (err)
+ return err;
+
+ hinic_aeq_register_hw_cb(hwdev, HINIC_MBX_FROM_FUNC,
+ hinic_mbox_func_aeqe_handler);
+ hinic_aeq_register_hw_cb(hwdev, HINIC_MBX_SEND_RSLT,
+ hinic_mbox_self_aeqe_handler);
+
+ if (!HINIC_IS_VF(hwdev)) {
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ comm_pf_mbox_handler);
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC,
+ sw_func_pf_mbox_handler);
+ } else {
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ vf_to_pf_handler);
+ }
+
+ set_bit(HINIC_HWDEV_MBOX_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev)
+{
+ hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_FROM_FUNC);
+ hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_SEND_RSLT);
+
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC);
+
+ hinic_func_to_func_free(hwdev);
+}
+
+static int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return 0; /* VF do not support send msg to mgmt directly */
+
+ err = hinic_pf_to_mgmt_init(hwdev);
+ if (err)
+ return err;
+
+ hinic_aeq_register_hw_cb(hwdev, HINIC_MSG_FROM_MGMT_CPU,
+ hinic_mgmt_msg_aeqe_handler);
+
+ hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_COMM,
+ hwdev->pf_to_mgmt, comm_mgmt_msg_handler);
+
+ set_bit(HINIC_HWDEV_MGMT_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return; /* VF do not support send msg to mgmt directly */
+
+ hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_COMM);
+
+ hinic_aeq_unregister_hw_cb(hwdev, HINIC_MSG_FROM_MGMT_CPU);
+
+ hinic_pf_to_mgmt_free(hwdev);
+}
+
+static int hinic_comm_clp_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return 0;
+
+ err = hinic_clp_pf_to_mgmt_init(hwdev);
+ if (err)
+ return err;
+
+ set_bit(HINIC_HWDEV_CLP_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic_comm_clp_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !FUNC_SUPPORT_MGMT(hwdev))
+ return;
+
+ clear_bit(HINIC_HWDEV_CLP_INITED, &hwdev->func_state);
+ hinic_clp_pf_to_mgmt_free(hwdev);
+}
+
+static int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_cmdqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n");
+ return err;
+ }
+
+ hinic_ceq_register_cb(hwdev, HINIC_CMDQ, hinic_cmdq_ceq_handler);
+
+ err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n");
+ goto set_cmdq_depth_err;
+ }
+
+ return 0;
+
+set_cmdq_depth_err:
+ hinic_cmdqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev)
+{
+ hinic_ceq_unregister_cb(hwdev, HINIC_CMDQ);
+ hinic_cmdqs_free(hwdev);
+}
+
+static int hinic_sync_mgmt_func_state(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
+
+ err = resources_state_set(hwdev, HINIC_RES_ACTIVE);
+ if (err) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed to set function resources state\n");
+ goto resources_state_set_err;
+ }
+
+ return 0;
+
+resources_state_set_err:
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
+
+ return err;
+}
+
+static void hinic_unsync_mgmt_func_state(struct hinic_hwdev *hwdev)
+{
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
+
+ resources_state_set(hwdev, HINIC_RES_CLEAN);
+}
+
+int hinic_l2nic_reset_base(struct hinic_hwdev *hwdev, u16 reset_flag)
+{
+ struct hinic_l2nic_reset l2nic_reset = {0};
+ u16 out_size = sizeof(l2nic_reset);
+ int err = 0;
+
+ err = hinic_set_vport_enable(hwdev, false);
+ if (err)
+ return err;
+
+ msleep(100);
+
+ sdk_info(hwdev->dev_hdl, "L2nic reset flag 0x%x\n", reset_flag);
+
+ err = hinic_global_func_id_get(hwdev, &l2nic_reset.func_id);
+ if (err)
+ return err;
+
+ l2nic_reset.reset_flag = reset_flag;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_L2NIC_RESET, &l2nic_reset,
+ sizeof(l2nic_reset), &l2nic_reset,
+ &out_size, 0);
+ if (err || !out_size || l2nic_reset.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, l2nic_reset.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_l2nic_reset_base);
+
+static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
+{
+ return hinic_l2nic_reset_base(hwdev, 0);
+}
+
+static int __get_func_misc_info(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic_get_board_info(hwdev, &hwdev->board_info);
+ if (err) {
+ /* For the pf/vf of slave host, return error */
+ if (hinic_pcie_itf_id(hwdev))
+ return err;
+
+ /* VF can't get board info in early version */
+ if (!HINIC_IS_VF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Get board info failed\n");
+ return err;
+ }
+
+ memset(&hwdev->board_info, 0xff,
+ sizeof(struct hinic_board_info));
+ }
+
+ err = hinic_get_mgmt_version(hwdev, hwdev->mgmt_ver);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Get mgmt cpu version failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* initialize communication channel */
+int hinic_init_comm_ch(struct hinic_hwdev *hwdev)
+{
+ int err;
+ u16 func_id;
+
+ if (IS_BMGW_SLAVE_HOST(hwdev) &&
+ (!hinic_get_master_host_mbox_enable(hwdev))) {
+ sdk_err(hwdev->dev_hdl, "Master host not initialized\n");
+ return -EFAULT;
+ }
+
+ err = hinic_comm_clp_to_mgmt_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init clp\n");
+ return err;
+ }
+
+ err = hinic_comm_aeqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n");
+ goto aeqs_init_err;
+ }
+
+ err = hinic_comm_pf_to_mgmt_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init msg\n");
+ goto msg_init_err;
+ }
+
+ err = hinic_comm_func_to_func_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n");
+ goto func_to_func_init_err;
+ }
+
+ err = init_aeqs_msix_attr(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n");
+ goto aeqs_msix_attr_init_err;
+ }
+
+ err = __get_func_misc_info(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to get function misc information\n");
+ goto get_func_info_err;
+ }
+
+ /* detect master host chip mode according board type and host id */
+ err = rectify_host_mode(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to rectify host mode\n");
+ goto rectify_mode_err;
+ }
+
+ err = hinic_l2nic_reset(hwdev);
+ if (err)
+ goto l2nic_reset_err;
+
+ if (IS_MULTI_HOST(hwdev)) {
+ err = hinic_multi_host_mgmt_init(hwdev);
+ if (err)
+ goto multi_host_mgmt_init_err;
+ }
+
+ dma_attr_table_init(hwdev);
+
+ err = hinic_comm_ceqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n");
+ goto ceqs_init_err;
+ }
+
+ err = init_ceqs_msix_attr(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n");
+ goto init_eqs_msix_err;
+ }
+
+ /* set default wq page_size */
+ hwdev->wq_page_size = HINIC_DEFAULT_WQ_PAGE_SIZE;
+
+ err = hinic_global_func_id_get(hwdev, &func_id);
+ if (err)
+ goto get_func_id_err;
+
+ err = hinic_set_wq_page_size(hwdev, func_id, hwdev->wq_page_size);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n");
+ goto init_wq_pg_size_err;
+ }
+
+ err = hinic_comm_cmdqs_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n");
+ goto cmdq_init_err;
+ }
+
+ set_bit(HINIC_HWDEV_CMDQ_INITED, &hwdev->func_state);
+
+ err = hinic_sync_mgmt_func_state(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to synchronize mgmt function state\n");
+ goto sync_mgmt_func_err;
+ }
+
+ err = hinic_aeq_register_swe_cb(hwdev, HINIC_STATELESS_EVENT,
+ hinic_nic_sw_aeqe_handler);
+ if (err) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed to register ucode aeqe handler\n");
+ goto register_ucode_aeqe_err;
+ }
+
+ set_bit(HINIC_HWDEV_COMM_CH_INITED, &hwdev->func_state);
+
+ return 0;
+
+register_ucode_aeqe_err:
+ hinic_unsync_mgmt_func_state(hwdev);
+sync_mgmt_func_err:
+ return err;
+
+cmdq_init_err:
+ if (HINIC_FUNC_TYPE(hwdev) != TYPE_VF)
+ hinic_set_wq_page_size(hwdev, func_id, HINIC_HW_WQ_PAGE_SIZE);
+init_wq_pg_size_err:
+get_func_id_err:
+init_eqs_msix_err:
+ hinic_comm_ceqs_free(hwdev);
+
+ceqs_init_err:
+ if (IS_MULTI_HOST(hwdev))
+ hinic_multi_host_mgmt_free(hwdev);
+multi_host_mgmt_init_err:
+l2nic_reset_err:
+rectify_mode_err:
+get_func_info_err:
+aeqs_msix_attr_init_err:
+func_to_func_init_err:
+ return err;
+
+msg_init_err:
+ hinic_comm_aeqs_free(hwdev);
+
+aeqs_init_err:
+ hinic_comm_clp_to_mgmt_free(hwdev);
+
+ return err;
+}
+
+static void __uninit_comm_module(struct hinic_hwdev *hwdev,
+ enum hinic_hwdev_init_state init_state)
+{
+ u16 func_id;
+
+ switch (init_state) {
+ case HINIC_HWDEV_COMM_CH_INITED:
+ hinic_aeq_unregister_swe_cb(hwdev,
+ HINIC_STATELESS_EVENT);
+ hinic_unsync_mgmt_func_state(hwdev);
+ break;
+ case HINIC_HWDEV_CMDQ_INITED:
+ hinic_comm_cmdqs_free(hwdev);
+ /* VF can set page size of 256K only, any other value
+ * will return error in pf, pf will set all vf's page
+ * size to 4K when disable sriov
+ */
+ if (HINIC_FUNC_TYPE(hwdev) != TYPE_VF) {
+ func_id = hinic_global_func_id_hw(hwdev);
+ hinic_set_wq_page_size(hwdev, func_id,
+ HINIC_HW_WQ_PAGE_SIZE);
+ }
+
+ hinic_comm_ceqs_free(hwdev);
+
+ if (IS_MULTI_HOST(hwdev))
+ hinic_multi_host_mgmt_free(hwdev);
+ break;
+ case HINIC_HWDEV_MBOX_INITED:
+ hinic_comm_func_to_func_free(hwdev);
+ break;
+ case HINIC_HWDEV_MGMT_INITED:
+ hinic_comm_pf_to_mgmt_free(hwdev);
+ break;
+ case HINIC_HWDEV_AEQ_INITED:
+ hinic_comm_aeqs_free(hwdev);
+ break;
+ case HINIC_HWDEV_CLP_INITED:
+ hinic_comm_clp_to_mgmt_free(hwdev);
+ break;
+ default:
+ break;
+ }
+}
+
+#define HINIC_FUNC_STATE_BUSY_TIMEOUT 300
+void hinic_uninit_comm_ch(struct hinic_hwdev *hwdev)
+{
+ enum hinic_hwdev_init_state init_state = HINIC_HWDEV_COMM_CH_INITED;
+ int cnt;
+
+ while (init_state > HINIC_HWDEV_NONE_INITED) {
+ if (!test_bit(init_state, &hwdev->func_state)) {
+ init_state--;
+ continue;
+ }
+ clear_bit(init_state, &hwdev->func_state);
+
+ cnt = 0;
+ while (test_bit(HINIC_HWDEV_STATE_BUSY, &hwdev->func_state) &&
+ cnt++ <= HINIC_FUNC_STATE_BUSY_TIMEOUT)
+ usleep_range(900, 1000);
+
+ __uninit_comm_module(hwdev, init_state);
+
+ init_state--;
+ }
+}
+
+int hinic_slq_init(void *dev, int num_wqs)
+{
+ struct hinic_hwdev *hwdev = dev;
+ int err;
+
+ if (!dev)
+ return -EINVAL;
+
+ hwdev->wqs = kzalloc(sizeof(*hwdev->wqs), GFP_KERNEL);
+ if (!hwdev->wqs)
+ return -ENOMEM;
+
+ err = hinic_wqs_alloc(hwdev->wqs, num_wqs, hwdev->dev_hdl);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc wqs\n");
+ kfree(hwdev->wqs);
+ hwdev->wqs = NULL;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_slq_init);
+
+void hinic_slq_uninit(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ if (!hwdev)
+ return;
+
+ hinic_wqs_free(hwdev->wqs);
+
+ kfree(hwdev->wqs);
+}
+EXPORT_SYMBOL(hinic_slq_uninit);
+
+int hinic_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, u16 page_size,
+ u64 *cla_addr, void **handle)
+{
+ struct hinic_hwdev *hwdev = dev;
+ struct hinic_wq *wq;
+ int err;
+
+ if (!dev || !cla_addr || !handle)
+ return -EINVAL;
+
+ wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+ if (!wq)
+ return -ENOMEM;
+
+ err = hinic_wq_allocate(hwdev->wqs, wq, wqebb_size, hwdev->wq_page_size,
+ q_depth, 0);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Failed to alloc wq\n");
+ kfree(wq);
+ return -EFAULT;
+ }
+
+ *cla_addr = wq->block_paddr;
+ *handle = wq;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_slq_alloc);
+
+void hinic_slq_free(void *dev, void *handle)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ if (!hwdev || !handle)
+ return;
+
+ hinic_wq_free(hwdev->wqs, handle);
+ kfree(handle);
+}
+EXPORT_SYMBOL(hinic_slq_free);
+
+u64 hinic_slq_get_addr(void *handle, u16 index)
+{
+ if (!handle)
+ return 0; /* NULL of wqe addr */
+
+ return (u64)hinic_get_wqebb_addr(handle, index);
+}
+EXPORT_SYMBOL(hinic_slq_get_addr);
+
+u64 hinic_slq_get_first_pageaddr(void *handle)
+{
+ struct hinic_wq *wq = handle;
+
+ if (!handle)
+ return 0; /* NULL of wqe addr */
+
+ return hinic_get_first_wqe_page_addr(wq);
+}
+EXPORT_SYMBOL(hinic_slq_get_first_pageaddr);
+
+int hinic_func_tmr_bitmap_set(void *hwdev, bool en)
+{
+ struct hinic_func_tmr_bitmap_op bitmap_op = {0};
+ u16 out_size = sizeof(bitmap_op);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &bitmap_op.func_idx);
+ if (err)
+ return err;
+
+ bitmap_op.ppf_idx = hinic_ppf_idx(hwdev);
+ if (en)
+ bitmap_op.op_id = FUNC_TMR_BITMAP_ENABLE;
+ else
+ bitmap_op.op_id = FUNC_TMR_BITMAP_DISABLE;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET,
+ &bitmap_op, sizeof(bitmap_op),
+ &bitmap_op, &out_size, 0);
+ if (err || !out_size || bitmap_op.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, bitmap_op.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_func_tmr_bitmap_set);
+
+static int ppf_ht_gpa_set(struct hinic_hwdev *hwdev, struct hinic_page_addr *pg0,
+ struct hinic_page_addr *pg1)
+{
+ struct comm_info_ht_gpa_set ht_gpa_set = {0};
+ u16 out_size = sizeof(ht_gpa_set);
+ int ret;
+
+ pg0->virt_addr = dma_alloc_coherent(hwdev->dev_hdl,
+ HINIC_HT_GPA_PAGE_SIZE,
+ &pg0->phys_addr, GFP_KERNEL);
+ if (!pg0->virt_addr) {
+ sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n");
+ return -EFAULT;
+ }
+
+ pg1->virt_addr = dma_alloc_coherent(hwdev->dev_hdl,
+ HINIC_HT_GPA_PAGE_SIZE,
+ &pg1->phys_addr, GFP_KERNEL);
+ if (!pg1->virt_addr) {
+ sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n");
+ return -EFAULT;
+ }
+
+ ht_gpa_set.page_pa0 = pg0->phys_addr;
+ ht_gpa_set.page_pa1 = pg1->phys_addr;
+ sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n",
+ pg0->phys_addr, pg1->phys_addr);
+ ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PPF_HT_GPA_SET,
+ &ht_gpa_set, sizeof(ht_gpa_set),
+ &ht_gpa_set, &out_size, 0);
+ if (ret || !out_size || ht_gpa_set.status) {
+ sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n",
+ ret, ht_gpa_set.status, out_size);
+ return -EFAULT;
+ }
+
+ hwdev->page_pa0.phys_addr = pg0->phys_addr;
+ hwdev->page_pa0.virt_addr = pg0->virt_addr;
+
+ hwdev->page_pa1.phys_addr = pg1->phys_addr;
+ hwdev->page_pa1.virt_addr = pg1->virt_addr;
+
+ return 0;
+}
+
+int hinic_ppf_ht_gpa_init(struct hinic_hwdev *hwdev)
+{
+ int ret;
+ int i;
+ int j;
+ int size;
+
+ struct hinic_page_addr page_addr0[HINIC_PPF_HT_GPA_SET_RETRY_TIMES];
+ struct hinic_page_addr page_addr1[HINIC_PPF_HT_GPA_SET_RETRY_TIMES];
+
+ size = HINIC_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]);
+ memset(page_addr0, 0, size);
+ memset(page_addr1, 0, size);
+
+ for (i = 0; i < HINIC_PPF_HT_GPA_SET_RETRY_TIMES; i++) {
+ ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]);
+ if (!ret)
+ break;
+ }
+
+ for (j = 0; j < i; j++) {
+ if (page_addr0[j].virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl,
+ HINIC_HT_GPA_PAGE_SIZE,
+ page_addr0[j].virt_addr,
+ page_addr0[j].phys_addr);
+ page_addr0[j].virt_addr = NULL;
+ }
+ if (page_addr1[j].virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl,
+ HINIC_HT_GPA_PAGE_SIZE,
+ page_addr1[j].virt_addr,
+ page_addr1[j].phys_addr);
+ page_addr1[j].virt_addr = NULL;
+ }
+ }
+
+ if (i >= HINIC_PPF_HT_GPA_SET_RETRY_TIMES) {
+ sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n",
+ i);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void hinic_ppf_ht_gpa_deinit(struct hinic_hwdev *hwdev)
+{
+ if (hwdev->page_pa0.virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl, HINIC_HT_GPA_PAGE_SIZE,
+ hwdev->page_pa0.virt_addr,
+ hwdev->page_pa0.phys_addr);
+ hwdev->page_pa0.virt_addr = NULL;
+ }
+
+ if (hwdev->page_pa1.virt_addr) {
+ dma_free_coherent(hwdev->dev_hdl, HINIC_HT_GPA_PAGE_SIZE,
+ hwdev->page_pa1.virt_addr,
+ hwdev->page_pa1.phys_addr);
+ hwdev->page_pa1.virt_addr = NULL;
+ }
+}
+
+static int set_ppf_tmr_status(struct hinic_hwdev *hwdev,
+ enum ppf_tmr_status status)
+{
+ struct hinic_ppf_tmr_op op = {0};
+ u16 out_size = sizeof(op);
+ int err = 0;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) != TYPE_PPF)
+ return -EFAULT;
+
+ if (status == HINIC_PPF_TMR_FLAG_START) {
+ err = hinic_ppf_ht_gpa_init(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n");
+ return -EFAULT;
+ }
+ } else {
+ hinic_ppf_ht_gpa_deinit(hwdev);
+ }
+
+ op.op_id = status;
+ op.ppf_idx = hinic_ppf_idx(hwdev);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PPF_TMR_SET, &op,
+ sizeof(op), &op, &out_size, 0);
+ if (err || !out_size || op.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, op.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_ppf_tmr_start(void *hwdev)
+{
+ if (!hwdev) {
+ pr_err("Hwdev pointer is NULL for starting ppf timer\n");
+ return -EINVAL;
+ }
+
+ return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_START);
+}
+EXPORT_SYMBOL(hinic_ppf_tmr_start);
+
+int hinic_ppf_tmr_stop(void *hwdev)
+{
+ if (!hwdev) {
+ pr_err("Hwdev pointer is NULL for stop ppf timer\n");
+ return -EINVAL;
+ }
+
+ return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_STOP);
+}
+EXPORT_SYMBOL(hinic_ppf_tmr_stop);
+
+int mqm_eqm_try_alloc_mem(struct hinic_hwdev *hwdev, u32 page_size,
+ u32 page_num)
+{
+ struct hinic_page_addr *page_addr = hwdev->mqm_att.brm_srch_page_addr;
+ u32 valid_num = 0;
+ u32 flag = 1;
+ u32 i = 0;
+
+ for (i = 0; i < page_num; i++) {
+ page_addr->virt_addr =
+ dma_alloc_coherent(hwdev->dev_hdl, page_size,
+ &page_addr->phys_addr, GFP_KERNEL);
+ if (!page_addr->virt_addr) {
+ flag = 0;
+ break;
+ }
+ valid_num++;
+ page_addr++;
+ }
+
+ if (flag == 1) {
+ hwdev->mqm_att.page_size = page_size;
+ hwdev->mqm_att.page_num = page_num;
+ } else {
+ page_addr = hwdev->mqm_att.brm_srch_page_addr;
+ for (i = 0; i < valid_num; i++) {
+ dma_free_coherent(hwdev->dev_hdl, page_size,
+ page_addr->virt_addr,
+ page_addr->phys_addr);
+ page_addr++;
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mqm_eqm_alloc_page_mem(struct hinic_hwdev *hwdev)
+{
+ int ret = 0;
+
+ /* apply for 64KB page, number is chunk_num/16 */
+ ret = mqm_eqm_try_alloc_mem(hwdev, 64 * 1024,
+ hwdev->mqm_att.chunk_num >> 4);
+ if (!ret)
+ return 0;
+
+ /* apply for 8KB page, number is chunk_num/2 */
+ ret = mqm_eqm_try_alloc_mem(hwdev, 8 * 1024,
+ hwdev->mqm_att.chunk_num >> 1);
+ if (!ret)
+ return 0;
+
+ /* apply for 4KB page, number is chunk_num */
+ ret = mqm_eqm_try_alloc_mem(hwdev, 4 * 1024,
+ hwdev->mqm_att.chunk_num);
+ if (!ret)
+ return 0;
+
+ return ret;
+}
+
+void mqm_eqm_free_page_mem(struct hinic_hwdev *hwdev)
+{
+ u32 i;
+ struct hinic_page_addr *page_addr;
+ u32 page_size;
+
+ page_size = hwdev->mqm_att.page_size;
+ page_addr = hwdev->mqm_att.brm_srch_page_addr;
+
+ for (i = 0; i < hwdev->mqm_att.page_num; i++) {
+ dma_free_coherent(hwdev->dev_hdl, page_size,
+ page_addr->virt_addr, page_addr->phys_addr);
+ page_addr++;
+ }
+}
+
+int mqm_eqm_set_cfg_2_hw(struct hinic_hwdev *hwdev, u32 valid)
+{
+ struct comm_info_eqm_cfg info_eqm_cfg = {0};
+ u16 out_size = sizeof(info_eqm_cfg);
+ int err;
+
+ info_eqm_cfg.ppf_id = hinic_global_func_id_hw(hwdev);
+ info_eqm_cfg.page_size = hwdev->mqm_att.page_size;
+ info_eqm_cfg.valid = valid;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MQM_CFG_INFO_SET,
+ &info_eqm_cfg, sizeof(info_eqm_cfg),
+ &info_eqm_cfg, &out_size, 0);
+ if (err || !out_size || info_eqm_cfg.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to init func table, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, info_eqm_cfg.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define EQM_DATA_BUF_SIZE 1024
+
+int mqm_eqm_set_page_2_hw(struct hinic_hwdev *hwdev)
+{
+ struct comm_info_eqm_search_gpa *info;
+ struct hinic_page_addr *page_addr;
+ void *send_buf;
+ u16 send_buf_size;
+ u32 i;
+ u64 *gpa_hi52;
+ u64 gpa;
+ u32 num;
+ u32 start_idx;
+ int err = 0;
+ u32 valid_page_num;
+ u16 out_size;
+
+ send_buf_size = sizeof(struct comm_info_eqm_search_gpa) +
+ EQM_DATA_BUF_SIZE;
+ send_buf = kzalloc(send_buf_size, GFP_KERNEL);
+ if (!send_buf) {
+ sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n");
+ return -EFAULT;
+ }
+
+ page_addr = hwdev->mqm_att.brm_srch_page_addr;
+ info = (struct comm_info_eqm_search_gpa *)send_buf;
+ valid_page_num = 0;
+
+ gpa_hi52 = info->gpa_hi52;
+ num = 0;
+ start_idx = 0;
+ for (i = 0; i < hwdev->mqm_att.page_num; i++) {
+ gpa = page_addr->phys_addr >> 12;
+ gpa_hi52[num] = gpa;
+ num++;
+ if (num == 128) {
+ info->num = num;
+ info->start_idx = start_idx;
+ out_size = send_buf_size;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MQM_SRCH_GPA_SET,
+ info, (u16)send_buf_size, info,
+ &out_size, 0);
+ if (err || !out_size || info->status) {
+ sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, info->status, out_size);
+ err = -EFAULT;
+ goto set_page_2_hw_end;
+ }
+
+ gpa_hi52 = info->gpa_hi52;
+ num = 0;
+ start_idx = i + 1;
+ }
+ page_addr++;
+ valid_page_num++;
+ }
+
+ if (0 != (valid_page_num & 0x7f)) {
+ info->num = num;
+ info->start_idx = start_idx;
+ out_size = send_buf_size;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MQM_SRCH_GPA_SET,
+ info, (u16)send_buf_size,
+ info, &out_size, 0);
+ if (err || !out_size || info->status) {
+ sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, info->status, out_size);
+ err = -EFAULT;
+ goto set_page_2_hw_end;
+ }
+ }
+
+set_page_2_hw_end:
+ kfree(send_buf);
+ return err;
+}
+
+int mqm_eqm_init(struct hinic_hwdev *hwdev)
+{
+ struct comm_info_eqm_fix info_eqm_fix = {0};
+ u16 len = sizeof(info_eqm_fix);
+ int ret;
+
+ if (hwdev->hwif->attr.func_type != TYPE_PPF)
+ return 0;
+
+ ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_MQM_FIX_INFO_GET,
+ &info_eqm_fix, sizeof(info_eqm_fix),
+ &info_eqm_fix, &len, 0);
+ if (ret || !len || info_eqm_fix.status) {
+ sdk_err(hwdev->dev_hdl, "Get mqm fix info failed, err: %d, status: 0x%x, out_size: 0x%x\n",
+ ret, info_eqm_fix.status, len);
+ return -EFAULT;
+ }
+ if (!(info_eqm_fix.chunk_num))
+ return 0;
+
+ hwdev->mqm_att.chunk_num = info_eqm_fix.chunk_num;
+ hwdev->mqm_att.search_gpa_num = info_eqm_fix.search_gpa_num;
+ hwdev->mqm_att.page_size = 0;
+ hwdev->mqm_att.page_num = 0;
+
+ hwdev->mqm_att.brm_srch_page_addr =
+ kcalloc(hwdev->mqm_att.chunk_num,
+ sizeof(struct hinic_page_addr), GFP_KERNEL);
+ if (!(hwdev->mqm_att.brm_srch_page_addr)) {
+ sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\n");
+ return -EFAULT;
+ }
+
+ ret = mqm_eqm_alloc_page_mem(hwdev);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "Alloc eqm page mem failed\n");
+ goto err_page;
+ }
+
+ ret = mqm_eqm_set_page_2_hw(hwdev);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "Set page to hw failed\n");
+ goto err_ecmd;
+ }
+
+ ret = mqm_eqm_set_cfg_2_hw(hwdev, 1);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "Set page to hw failed\n");
+ goto err_ecmd;
+ }
+
+ return 0;
+
+err_ecmd:
+ mqm_eqm_free_page_mem(hwdev);
+
+err_page:
+ kfree(hwdev->mqm_att.brm_srch_page_addr);
+
+ return ret;
+}
+
+void mqm_eqm_deinit(struct hinic_hwdev *hwdev)
+{
+ int ret;
+
+ if (hwdev->hwif->attr.func_type != TYPE_PPF)
+ return;
+
+ if (!(hwdev->mqm_att.chunk_num))
+ return;
+
+ mqm_eqm_free_page_mem(hwdev);
+ kfree(hwdev->mqm_att.brm_srch_page_addr);
+
+ ret = mqm_eqm_set_cfg_2_hw(hwdev, 0);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "Set mqm eqm cfg to chip fail, err: %d\n",
+ ret);
+ return;
+ }
+
+ hwdev->mqm_att.chunk_num = 0;
+ hwdev->mqm_att.search_gpa_num = 0;
+ hwdev->mqm_att.page_num = 0;
+ hwdev->mqm_att.page_size = 0;
+}
+
+int hinic_ppf_ext_db_init(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ret = mqm_eqm_init(hwdev);
+ if (ret) {
+ sdk_err(hwdev->dev_hdl, "MQM eqm init failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_ppf_ext_db_init);
+
+int hinic_ppf_ext_db_deinit(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (hwdev->hwif->attr.func_type != TYPE_PPF)
+ return -EFAULT;
+
+ mqm_eqm_deinit(hwdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_ppf_ext_db_deinit);
+
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct hinic_wq_page_size page_size_info = {0};
+ u16 out_size = sizeof(page_size_info);
+ int err;
+
+ page_size_info.func_idx = func_idx;
+ page_size_info.ppf_idx = hinic_ppf_idx(hwdev);
+ page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size);
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PAGESIZE_SET,
+ &page_size_info, sizeof(page_size_info),
+ &page_size_info, &out_size, 0);
+ if (err || !out_size || page_size_info.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
+ err, page_size_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+enum hinic_event_cmd {
+ /* hilink event */
+ HINIC_EVENT_LINK_STATUS_CHANGE = 1,
+ HINIC_EVENT_LINK_ERR,
+ HINIC_EVENT_CABLE_PLUG,
+ HINIC_EVENT_HILINK_INFO,
+ /* reserved for hilink */
+
+ /* driver event, pf & vf communicate */
+ HINIC_EVENT_HEARTBEAT_LOST = 31,
+ HINIC_EVENT_SET_VF_COS,
+
+ /* mgmt event */
+ HINIC_EVENT_MGMT_FAULT = 61,
+ HINIC_EVENT_MGMT_WATCHDOG,
+ HINIC_EVENT_MGMT_FMW_ACT_NTC,
+ HINIC_EVENT_MGMT_RESET,
+ HINIC_EVENT_MGMT_PCIE_DFX,
+ HINIC_EVENT_MCTP_HOST_INFO,
+ HINIC_EVENT_SFP_INFO_REPORT,
+ HINIC_EVENT_SFP_ABS_REPORT,
+
+ HINIC_EVENT_MAX_TYPE,
+};
+
+struct hinic_event_convert {
+ u8 mod;
+ u8 cmd;
+
+ enum hinic_event_cmd event;
+};
+
+static struct hinic_event_convert __event_convert[] = {
+ /* hilink event */
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_LINK_STATUS_REPORT,
+ .event = HINIC_EVENT_LINK_STATUS_CHANGE,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_LINK_ERR_EVENT,
+ .event = HINIC_EVENT_LINK_ERR,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_CABLE_PLUG_EVENT,
+ .event = HINIC_EVENT_CABLE_PLUG,
+ },
+ {
+ .mod = HINIC_MOD_HILINK,
+ .cmd = HINIC_HILINK_CMD_GET_LINK_INFO,
+ .event = HINIC_EVENT_HILINK_INFO,
+ },
+
+ /* driver triggered event */
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_MGMT_CMD_HEART_LOST_REPORT,
+ .event = HINIC_EVENT_HEARTBEAT_LOST,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_SET_VF_COS,
+ .event = HINIC_EVENT_SET_VF_COS,
+ },
+
+ /* mgmt event */
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_FAULT_REPORT,
+ .event = HINIC_EVENT_MGMT_FAULT,
+ },
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_WATCHDOG_INFO,
+ .event = HINIC_EVENT_MGMT_WATCHDOG,
+ },
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_FMW_ACT_NTC,
+ .event = HINIC_EVENT_MGMT_FMW_ACT_NTC,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_MGMT_RESET,
+ .event = HINIC_EVENT_MGMT_RESET,
+ },
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_PCIE_DFX_NTC,
+ .event = HINIC_EVENT_MGMT_PCIE_DFX,
+ },
+ {
+ .mod = HINIC_MOD_COMM,
+ .cmd = HINIC_MGMT_CMD_GET_HOST_INFO,
+ .event = HINIC_EVENT_MCTP_HOST_INFO,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_GET_SFP_INFO,
+ .event = HINIC_EVENT_SFP_INFO_REPORT,
+ },
+ {
+ .mod = HINIC_MOD_L2NIC,
+ .cmd = HINIC_PORT_CMD_GET_SFP_ABS,
+ .event = HINIC_EVENT_SFP_ABS_REPORT,
+ },
+};
+
+static enum hinic_event_cmd __get_event_type(u8 mod, u8 cmd)
+{
+ int idx;
+ int arr_size = ARRAY_SIZE(__event_convert);
+
+ for (idx = 0; idx < arr_size; idx++) {
+ if (__event_convert[idx].mod == mod &&
+ __event_convert[idx].cmd == cmd)
+ return __event_convert[idx].event;
+ }
+
+ return HINIC_EVENT_MAX_TYPE;
+}
+
+bool hinic_mgmt_event_ack_first(u8 mod, u8 cmd)
+{
+ if ((mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_GET_HOST_INFO) ||
+ (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_HEARTBEAT_EVENT))
+ return false;
+
+ if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC ||
+ mod == HINIC_MOD_HILINK)
+ return true;
+
+ return false;
+}
+
+#define FAULT_SHOW_STR_LEN 16
+static void fault_report_show(struct hinic_hwdev *hwdev,
+ struct hinic_fault_event *event)
+{
+ char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "chip", "ucode", "mem rd timeout", "mem wr timeout",
+ "reg rd timeout", "reg wr timeout", "phy fault"};
+ char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = {
+ "fatal", "reset", "flr", "general", "suggestion"};
+ char type_str[FAULT_SHOW_STR_LEN + 1];
+ char level_str[FAULT_SHOW_STR_LEN + 1];
+ u8 level;
+ u32 pos, base;
+ struct hinic_fault_event_stats *fault;
+ u8 node_id;
+
+ sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %d\n",
+ hinic_global_func_id(hwdev));
+
+ memset(type_str, 0, FAULT_SHOW_STR_LEN + 1);
+ if (event->type < FAULT_TYPE_MAX)
+ strscpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN);
+ else
+ strscpy(type_str, "Unknown", FAULT_SHOW_STR_LEN);
+
+ sdk_err(hwdev->dev_hdl, "Fault type: %d [%s]\n", event->type, type_str);
+ sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n",
+ event->event.val[0], event->event.val[1], event->event.val[2],
+ event->event.val[3]);
+
+ fault = &hwdev->hw_stats.fault_event_stats;
+
+ switch (event->type) {
+ case FAULT_TYPE_CHIP:
+ memset(level_str, 0, FAULT_SHOW_STR_LEN + 1);
+ level = event->event.chip.err_level;
+ if (level < FAULT_LEVEL_MAX)
+ strscpy(level_str, fault_level[level],
+ FAULT_SHOW_STR_LEN);
+ else
+ strscpy(level_str, "Unknown", FAULT_SHOW_STR_LEN);
+
+ if (level == FAULT_LEVEL_SERIOUS_FLR) {
+ sdk_err(hwdev->dev_hdl, "err_level: %d [%s], flr func_id: %d\n",
+ level, level_str, event->event.chip.func_id);
+ atomic_inc(&fault->fault_type_stat[event->type]);
+ }
+ sdk_err(hwdev->dev_hdl, "module_id: 0x%x, err_type: 0x%x, err_level: %d[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n",
+ event->event.chip.node_id,
+ event->event.chip.err_type, level, level_str,
+ event->event.chip.err_csr_addr,
+ event->event.chip.err_csr_value);
+
+ node_id = event->event.chip.node_id;
+ atomic_inc(&fault->chip_fault_stats[node_id][level]);
+
+ base = event->event.chip.node_id * FAULT_LEVEL_MAX *
+ HINIC_CHIP_ERROR_TYPE_MAX;
+ pos = base + HINIC_CHIP_ERROR_TYPE_MAX * level +
+ event->event.chip.err_type;
+ if (pos < HINIC_CHIP_FAULT_SIZE)
+ hwdev->chip_fault_stats[pos]++;
+ break;
+ case FAULT_TYPE_UCODE:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+
+ sdk_err(hwdev->dev_hdl, "cause_id: %d, core_id: %d, c_id: %d, epc: 0x%08x\n",
+ event->event.ucode.cause_id, event->event.ucode.core_id,
+ event->event.ucode.c_id, event->event.ucode.epc);
+ break;
+ case FAULT_TYPE_MEM_RD_TIMEOUT:
+ case FAULT_TYPE_MEM_WR_TIMEOUT:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+
+ sdk_err(hwdev->dev_hdl, "err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n",
+ event->event.mem_timeout.err_csr_ctrl,
+ event->event.mem_timeout.err_csr_data,
+ event->event.mem_timeout.ctrl_tab,
+ event->event.mem_timeout.mem_index);
+ break;
+ case FAULT_TYPE_REG_RD_TIMEOUT:
+ case FAULT_TYPE_REG_WR_TIMEOUT:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+ sdk_err(hwdev->dev_hdl, "err_csr: 0x%08x\n",
+ event->event.reg_timeout.err_csr);
+ break;
+ case FAULT_TYPE_PHY_FAULT:
+ atomic_inc(&fault->fault_type_stat[event->type]);
+ sdk_err(hwdev->dev_hdl, "op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n",
+ event->event.phy_fault.op_type,
+ event->event.phy_fault.port_id,
+ event->event.phy_fault.dev_ad,
+ event->event.phy_fault.csr_addr,
+ event->event.phy_fault.op_data);
+ break;
+ default:
+ break;
+ }
+}
+
+void hinic_migrate_report(void *dev)
+{
+ struct hinic_hwdev *hwdev = (struct hinic_hwdev *)dev;
+ struct hinic_event_info event_info = {0};
+
+ if (!dev)
+ return;
+
+ event_info.type = HINIC_EVENT_INIT_MIGRATE_PF;
+ if (hwdev->event_callback)
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+}
+EXPORT_SYMBOL(hinic_migrate_report);
+
+static void fault_event_handler(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_cmd_fault_event *fault_event;
+ struct hinic_event_info event_info;
+ u8 fault_level;
+
+ if (in_size != sizeof(*fault_event)) {
+ sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %d, should be %ld\n",
+ in_size, sizeof(*fault_event));
+ return;
+ }
+
+ fault_event = buf_in;
+ fault_report_show(hwdev, &fault_event->event);
+
+ if (fault_event->event.type == HINIC_FAULT_SRC_HW_MGMT_CHIP)
+ fault_level = fault_event->event.event.chip.err_level;
+ else
+ fault_level = FAULT_LEVEL_FATAL;
+
+ if (hwdev->event_callback) {
+ event_info.type = HINIC_EVENT_FAULT;
+ memcpy(&event_info.info, &fault_event->event,
+ sizeof(event_info.info));
+ event_info.info.fault_level = fault_level;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+ }
+}
+
+static void heartbeat_lost_event_handler(struct hinic_hwdev *hwdev)
+{
+ struct hinic_event_info event_info = {0};
+
+ atomic_inc(&hwdev->hw_stats.heart_lost_stats);
+ sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n",
+ hinic_global_func_id(hwdev));
+
+ if (hwdev->event_callback) {
+ event_info.type = HINIC_EVENT_HEART_LOST;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+ }
+}
+
+static void link_status_event_handler(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_port_link_status *link_status, *ret_link_status;
+ struct hinic_event_info event_info = {0};
+ struct hinic_event_link_info *link_info = &event_info.link_info;
+ struct nic_port_info port_info = {0};
+ int err;
+
+ /* Ignore link change event */
+ if (FUNC_FORCE_LINK_UP(hwdev))
+ return;
+
+ link_status = buf_in;
+ sdk_info(hwdev->dev_hdl, "Link status report received, func_id: %d, status: %d\n",
+ hinic_global_func_id(hwdev), link_status->link);
+
+ if (link_status->link)
+ atomic_inc(&hwdev->hw_stats.link_event_stats.link_up_stats);
+ else
+ atomic_inc(&hwdev->hw_stats.link_event_stats.link_down_stats);
+
+ /* link event reported only after set vport enable */
+ if (hinic_func_type(hwdev) != TYPE_VF &&
+ link_status->link == HINIC_EVENT_LINK_UP) {
+ err = hinic_get_port_info(hwdev, &port_info);
+ if (err) {
+ nic_warn(hwdev->dev_hdl, "Failed to get port info\n");
+ } else {
+ link_info->valid = 1;
+ link_info->port_type = port_info.port_type;
+ link_info->autoneg_cap = port_info.autoneg_cap;
+ link_info->autoneg_state = port_info.autoneg_state;
+ link_info->duplex = port_info.duplex;
+ link_info->speed = port_info.speed;
+ hinic_refresh_nic_cfg(hwdev, &port_info);
+ }
+ }
+
+ if (!hwdev->event_callback)
+ return;
+
+ event_info.type = link_status->link ?
+ HINIC_EVENT_LINK_UP : HINIC_EVENT_LINK_DOWN;
+
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+ if (hinic_func_type(hwdev) != TYPE_VF) {
+ hinic_notify_all_vfs_link_changed(hwdev, link_status->link);
+ ret_link_status = buf_out;
+ ret_link_status->status = 0;
+ *out_size = sizeof(*ret_link_status);
+ }
+}
+
+static void module_status_event(struct hinic_hwdev *hwdev,
+ enum hinic_event_cmd cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_cable_plug_event *plug_event;
+ struct hinic_link_err_event *link_err;
+ struct hinic_event_info event_info = {0};
+ struct hinic_port_routine_cmd *rt_cmd;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ event_info.type = HINIC_EVENT_PORT_MODULE_EVENT;
+
+ if (cmd == HINIC_EVENT_CABLE_PLUG) {
+ plug_event = buf_in;
+
+ if (plug_event->port_id < HINIC_MAX_PORT_ID) {
+ rt_cmd = &chip_node->rt_cmd[plug_event->port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ rt_cmd->up_send_sfp_abs = false;
+ rt_cmd->up_send_sfp_info = false;
+ mutex_unlock(&chip_node->sfp_mutex);
+ }
+
+ event_info.module_event.type = plug_event->plugged ?
+ HINIC_PORT_MODULE_CABLE_PLUGGED :
+ HINIC_PORT_MODULE_CABLE_UNPLUGGED;
+
+ *out_size = sizeof(*plug_event);
+ plug_event = buf_out;
+ plug_event->status = 0;
+ } else if (cmd == HINIC_EVENT_LINK_ERR) {
+ link_err = buf_in;
+
+ event_info.module_event.type = HINIC_PORT_MODULE_LINK_ERR;
+ event_info.module_event.err_type = link_err->err_type;
+
+ *out_size = sizeof(*link_err);
+ link_err = buf_out;
+ link_err->status = 0;
+ } else {
+ sdk_warn(hwdev->dev_hdl, "Unknown module event: %d\n", cmd);
+ return;
+ }
+
+ if (!hwdev->event_callback)
+ return;
+
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+}
+
+void hinic_notify_dcb_state_event(struct hinic_hwdev *hwdev,
+ struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_event_info event_info = {0};
+
+ sdk_info(hwdev->dev_hdl, "DCB %s, default cos %d, up2cos %d%d%d%d%d%d%d%d\n",
+ dcb_state->dcb_on ? "on" : "off", dcb_state->default_cos,
+ dcb_state->up_cos[0], dcb_state->up_cos[1],
+ dcb_state->up_cos[2], dcb_state->up_cos[3],
+ dcb_state->up_cos[4], dcb_state->up_cos[5],
+ dcb_state->up_cos[6], dcb_state->up_cos[7]);
+
+ /* Saved in sdk for statefull module */
+ hinic_save_dcb_state(hwdev, dcb_state);
+
+ if (!hwdev->event_callback)
+ return;
+
+ event_info.type = HINIC_EVENT_DCB_STATE_CHANGE;
+ memcpy(&event_info.dcb_state, dcb_state, sizeof(event_info.dcb_state));
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+}
+
+static void sw_watchdog_timeout_info_show(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_mgmt_watchdog_info *watchdog_info;
+ u32 *dump_addr, *reg, stack_len, i, j;
+
+ if (in_size != sizeof(*watchdog_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld\n",
+ in_size, sizeof(*watchdog_info));
+ return;
+ }
+
+ watchdog_info = buf_in;
+
+ sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x\n",
+ watchdog_info->curr_time_h, watchdog_info->curr_time_l,
+ watchdog_info->task_id, watchdog_info->sp);
+ sdk_err(hwdev->dev_hdl, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x\n",
+ watchdog_info->curr_used, watchdog_info->peak_used,
+ watchdog_info->is_overflow, watchdog_info->stack_top,
+ watchdog_info->stack_bottom);
+
+ sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr:0x%08x\n",
+ watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr);
+
+ sdk_err(hwdev->dev_hdl, "Mgmt register info\n");
+
+ for (i = 0; i < 3; i++) {
+ reg = watchdog_info->reg + (u64)(u32)(4 * i);
+ sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(reg), *(reg + 1), *(reg + 2), *(reg + 3));
+ }
+
+ sdk_err(hwdev->dev_hdl, "0x%08x\n", watchdog_info->reg[12]);
+
+ if (watchdog_info->stack_actlen <= 1024) {
+ stack_len = watchdog_info->stack_actlen;
+ } else {
+ sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n",
+ watchdog_info->stack_actlen);
+ stack_len = 1024;
+ }
+
+ sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16Bytes per line(start from sp)\n");
+ for (i = 0; i < (stack_len / 16); i++) {
+ dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16)));
+ sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *dump_addr, *(dump_addr + 1), *(dump_addr + 2),
+ *(dump_addr + 3));
+ }
+
+ for (j = 0; j < ((stack_len % 16) / 4); j++) {
+ dump_addr = (u32 *)(watchdog_info->data +
+ ((u64)(u32)(i * 16 + j * 4)));
+ sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr);
+ }
+
+ *out_size = sizeof(*watchdog_info);
+ watchdog_info = buf_out;
+ watchdog_info->status = 0;
+}
+
+static void mgmt_watchdog_timeout_event_handler(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_event_info event_info = { 0 };
+
+ sw_watchdog_timeout_info_show(hwdev, buf_in, in_size,
+ buf_out, out_size);
+
+ if (hwdev->event_callback) {
+ event_info.type = HINIC_EVENT_MGMT_WATCHDOG_EVENT;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+ }
+}
+
+static void port_sfp_info_event(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_cmd_get_sfp_qsfp_info *sfp_info = buf_in;
+ struct hinic_port_routine_cmd *rt_cmd;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ if (in_size != sizeof(*sfp_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid sfp info cmd, length: %d, should be %ld\n",
+ in_size, sizeof(*sfp_info));
+ return;
+ }
+
+ if (sfp_info->port_id >= HINIC_MAX_PORT_ID) {
+ sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n",
+ sfp_info->port_id, HINIC_MAX_PORT_ID - 1);
+ return;
+ }
+
+ if (!chip_node->rt_cmd)
+ return;
+
+ rt_cmd = &chip_node->rt_cmd[sfp_info->port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ memcpy(&rt_cmd->sfp_info, sfp_info, sizeof(rt_cmd->sfp_info));
+ rt_cmd->up_send_sfp_info = true;
+ mutex_unlock(&chip_node->sfp_mutex);
+}
+
+static void port_sfp_abs_event(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_cmd_get_light_module_abs *sfp_abs = buf_in;
+ struct hinic_port_routine_cmd *rt_cmd;
+ struct card_node *chip_node = hwdev->chip_node;
+
+ if (in_size != sizeof(*sfp_abs)) {
+ sdk_err(hwdev->dev_hdl, "Invalid sfp absent cmd, length: %d, should be %ld\n",
+ in_size, sizeof(*sfp_abs));
+ return;
+ }
+
+ if (sfp_abs->port_id >= HINIC_MAX_PORT_ID) {
+ sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n",
+ sfp_abs->port_id, HINIC_MAX_PORT_ID - 1);
+ return;
+ }
+
+ if (!chip_node->rt_cmd)
+ return;
+
+ rt_cmd = &chip_node->rt_cmd[sfp_abs->port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ memcpy(&rt_cmd->abs, sfp_abs, sizeof(rt_cmd->abs));
+ rt_cmd->up_send_sfp_abs = true;
+ mutex_unlock(&chip_node->sfp_mutex);
+}
+
+static void mgmt_reset_event_handler(struct hinic_hwdev *hwdev)
+{
+ sdk_info(hwdev->dev_hdl, "Mgmt is reset\n");
+
+ /* mgmt reset only occurred when hot update or Mgmt deadloop,
+ * if Mgmt deadloop, mgmt will report an event with
+ * mod=0, cmd=0x56, and will reported fault to os,
+ * so mgmt reset event don't need to report fault
+ */
+}
+
+static void hinic_fmw_act_ntc_handler(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_event_info event_info = {0};
+ struct hinic_fmw_act_ntc *notice_info;
+
+ if (in_size != sizeof(*notice_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld\n",
+ in_size, sizeof(*notice_info));
+ return;
+ }
+
+ if (!hwdev->event_callback)
+ return;
+
+ event_info.type = HINIC_EVENT_FMW_ACT_NTC;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+
+ *out_size = sizeof(*notice_info);
+ notice_info = buf_out;
+ notice_info->status = 0;
+}
+
+static void hinic_pcie_dfx_event_handler(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_pcie_dfx_ntc *notice_info = buf_in;
+ struct hinic_pcie_dfx_info *dfx_info;
+ u16 size = 0;
+ u16 cnt = 0;
+ u32 num = 0;
+ u32 i, j;
+ int err;
+ u32 *reg;
+
+ if (in_size != sizeof(*notice_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld\n",
+ in_size, sizeof(*notice_info));
+ return;
+ }
+
+ dfx_info = kzalloc(sizeof(*dfx_info), GFP_KERNEL);
+ if (!dfx_info)
+ return;
+
+ ((struct hinic_pcie_dfx_ntc *)buf_out)->status = 0;
+ *out_size = sizeof(*notice_info);
+ num = (u32)(notice_info->len / 1024);
+ sdk_info(hwdev->dev_hdl, "INFO LEN: %d\n", notice_info->len);
+ sdk_info(hwdev->dev_hdl, "PCIE DFX:\n");
+ dfx_info->host_id = 0;
+ for (i = 0; i < num; i++) {
+ dfx_info->offset = i * MAX_PCIE_DFX_BUF_SIZE;
+ if (i == (num - 1))
+ dfx_info->last = 1;
+ size = sizeof(*dfx_info);
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_PCIE_DFX_GET,
+ dfx_info, sizeof(*dfx_info),
+ dfx_info, &size, 0);
+ if (err || dfx_info->status || !size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, dfx_info->status, size);
+ kfree(dfx_info);
+ return;
+ }
+
+ reg = (u32 *)dfx_info->data;
+ for (j = 0; j < 256; j = j + 8) {
+ /*lint -save -e661 -e662*/
+ sdk_info(hwdev->dev_hdl, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ cnt, reg[j], reg[(u32)(j + 1)],
+ reg[(u32)(j + 2)], reg[(u32)(j + 3)],
+ reg[(u32)(j + 4)], reg[(u32)(j + 5)],
+ reg[(u32)(j + 6)], reg[(u32)(j + 7)]);
+ /*lint -restore*/
+ cnt = cnt + 32;
+ }
+ memset(dfx_info->data, 0, MAX_PCIE_DFX_BUF_SIZE);
+ }
+ kfree(dfx_info);
+}
+
+struct hinic_mctp_get_host_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 huawei_cmd;
+ u8 sub_cmd;
+ u8 rsvd[2];
+
+ u32 actual_len;
+
+ u8 data[1024];
+};
+
+static void hinic_mctp_get_host_info_event_handler(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_event_info event_info = {0};
+ struct hinic_mctp_get_host_info *mctp_out, *mctp_in;
+ struct hinic_mctp_host_info *host_info;
+
+ if (in_size != sizeof(*mctp_in)) {
+ sdk_err(hwdev->dev_hdl, "Invalid mgmt mctp info, length: %d, should be %ld\n",
+ in_size, sizeof(*mctp_in));
+ return;
+ }
+
+ *out_size = sizeof(*mctp_out);
+ mctp_out = buf_out;
+ mctp_out->status = 0;
+
+ if (!hwdev->event_callback) {
+ mctp_out->status = HINIC_MGMT_STATUS_ERR_INIT;
+ return;
+ }
+
+ mctp_in = buf_in;
+ host_info = &event_info.mctp_info;
+ host_info->major_cmd = mctp_in->huawei_cmd;
+ host_info->sub_cmd = mctp_in->sub_cmd;
+ host_info->data = mctp_out->data;
+
+ event_info.type = HINIC_EVENT_MCTP_GET_HOST_INFO;
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+
+ mctp_out->actual_len = host_info->data_len;
+}
+
+static char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {"RS-FEC", "BASE-FEC", "NO-FEC"};
+
+static char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = {
+ "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC",
+ "Back plane", "BaseT"
+};
+
+static void __print_cable_info(struct hinic_hwdev *hwdev,
+ struct hinic_link_info *info)
+{
+ char tmp_str[CAP_INFO_MAC_LEN] = {0};
+ char tmp_vendor[VENDOR_MAX_LEN] = {0};
+ char *port_type = "Unknown port type";
+ int i;
+ int err = 0;
+
+ if (info->cable_absent) {
+ sdk_info(hwdev->dev_hdl, "Cable unpresent\n");
+ return;
+ }
+
+ if (info->port_type < LINK_PORT_MAX_TYPE)
+ port_type = __hw_to_char_port_type[info->port_type];
+ else
+ sdk_info(hwdev->dev_hdl, "Unknown port type: %u\n",
+ info->port_type);
+ if (info->port_type == LINK_PORT_FIBRE) {
+ if (info->port_sub_type == FIBRE_SUBTYPE_SR)
+ port_type = "Fibre-SR";
+ else if (info->port_sub_type == FIBRE_SUBTYPE_LR)
+ port_type = "Fibre-LR";
+ }
+
+ for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) {
+ if (info->vendor_name[i] == ' ')
+ info->vendor_name[i] = '\0';
+ else
+ break;
+ }
+
+ memcpy(tmp_vendor, info->vendor_name,
+ sizeof(info->vendor_name));
+ err = snprintf(tmp_str, sizeof(tmp_str),
+ "Vendor: %s, %s, length: %um, max_speed: %uGbps",
+ tmp_vendor, port_type, info->cable_length,
+ info->cable_max_speed);
+ if (err <= 0 || err >= CAP_INFO_MAC_LEN) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed snprintf cable vendor info, function return(%d) and dest_len(%d)\n",
+ err, CAP_INFO_MAC_LEN);
+ return;
+ }
+
+ if (info->port_type == LINK_PORT_FIBRE ||
+ info->port_type == LINK_PORT_AOC) {
+ err = snprintf(tmp_str, sizeof(tmp_str),
+ "%s, %s, Temperature: %u", tmp_str,
+ info->sfp_type ? "SFP" : "QSFP",
+ info->cable_temp);
+ if (err <= 0 || err >= CAP_INFO_MAC_LEN) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed snprintf cable Temp, function return(%d) and dest_len(%d)\n",
+ err, CAP_INFO_MAC_LEN);
+ return;
+ }
+
+ if (info->sfp_type) {
+ err = snprintf(tmp_str, sizeof(tmp_str),
+ "%s, rx power: %uuW, tx power: %uuW",
+ tmp_str, info->power[0], info->power[1]);
+ } else {
+ err = snprintf(tmp_str, sizeof(tmp_str),
+ "%s, rx power: %uuw %uuW %uuW %uuW",
+ tmp_str, info->power[0], info->power[1],
+ info->power[2], info->power[3]);
+ }
+ if (err <= 0 || err >= CAP_INFO_MAC_LEN) {
+ sdk_err(hwdev->dev_hdl,
+ "Failed snprintf power info, function return(%d) and dest_len(%d)\n",
+ err, CAP_INFO_MAC_LEN);
+ return;
+ }
+ }
+
+ sdk_info(hwdev->dev_hdl, "Cable information: %s\n",
+ tmp_str);
+}
+
+static void __hi30_lane_info(struct hinic_hwdev *hwdev,
+ struct hilink_lane *lane)
+{
+ struct hi30_ffe_data *ffe_data;
+ struct hi30_ctle_data *ctle_data;
+
+ ffe_data = (struct hi30_ffe_data *)lane->hi30_ffe;
+ ctle_data = (struct hi30_ctle_data *)lane->hi30_ctle;
+
+ sdk_info(hwdev->dev_hdl, "TX_FFE: PRE1=%s%d; PRE2=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d\n",
+ (ffe_data->PRE1 & 0x10) ? "-" : "",
+ (int)(ffe_data->PRE1 & 0xf),
+ (ffe_data->PRE2 & 0x10) ? "-" : "",
+ (int)(ffe_data->PRE2 & 0xf),
+ (int)ffe_data->MAIN,
+ (ffe_data->POST1 & 0x10) ? "-" : "",
+ (int)(ffe_data->POST1 & 0xf),
+ (ffe_data->POST2 & 0x10) ? "-" : "",
+ (int)(ffe_data->POST2 & 0xf));
+ sdk_info(hwdev->dev_hdl, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u\n",
+ ctle_data->ctlebst[0], ctle_data->ctlebst[1],
+ ctle_data->ctlebst[2], ctle_data->ctlecmband[0],
+ ctle_data->ctlecmband[1], ctle_data->ctlecmband[2],
+ ctle_data->ctlermband[0], ctle_data->ctlermband[1],
+ ctle_data->ctlermband[2], ctle_data->ctleza[0],
+ ctle_data->ctleza[1], ctle_data->ctleza[2]);
+}
+
+static void __print_hi30_status(struct hinic_hwdev *hwdev,
+ struct hinic_link_info *info)
+{
+ struct hilink_lane *lane;
+ int lane_used_num = 0, i;
+
+ for (i = 0; i < HILINK_MAX_LANE; i++) {
+ lane = (struct hilink_lane *)(info->lane2 + i * sizeof(*lane));
+ if (!lane->lane_used)
+ continue;
+
+ __hi30_lane_info(hwdev, lane);
+ lane_used_num++;
+ }
+
+ /* in new firmware, all lane info setted in lane2 */
+ if (lane_used_num)
+ return;
+
+ /* compatible old firmware */
+ __hi30_lane_info(hwdev, (struct hilink_lane *)info->lane1);
+}
+
+static void __print_link_info(struct hinic_hwdev *hwdev,
+ struct hinic_link_info *info,
+ enum hilink_info_print_event type)
+{
+ char *fec = "None";
+
+ if (info->fec < HILINK_FEC_MAX_TYPE)
+ fec = __hw_to_char_fec[info->fec];
+ else
+ sdk_info(hwdev->dev_hdl, "Unknown fec type: %u\n",
+ info->fec);
+
+ if (type == HILINK_EVENT_LINK_UP || !info->an_state) {
+ sdk_info(hwdev->dev_hdl, "Link information: speed %dGbps, %s, autoneg %s\n",
+ info->speed, fec, info->an_state ? "on" : "off");
+ } else {
+ sdk_info(hwdev->dev_hdl, "Link information: antoneg: %s\n",
+ info->an_state ? "on" : "off");
+ }
+}
+
+static char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = {
+ "", "link up", "link down", "cable plugged"
+};
+
+static void print_hilink_info(struct hinic_hwdev *hwdev,
+ enum hilink_info_print_event type,
+ struct hinic_link_info *info)
+{
+ __print_cable_info(hwdev, info);
+
+ __print_link_info(hwdev, info, type);
+
+ __print_hi30_status(hwdev, info);
+
+ if (type == HILINK_EVENT_LINK_UP)
+ return;
+
+ if (type == HILINK_EVENT_CABLE_PLUGGED) {
+ sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u\n",
+ info->alos, info->rx_los);
+ return;
+ }
+
+ sdk_info(hwdev->dev_hdl, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug info reg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x\n",
+ info->pma_status == 1 ? "off" : "on",
+ info->mac_tx_en ? "enable" : "disable",
+ info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg,
+ info->pma_signal_ok_reg, info->rf_lf_status_reg);
+ sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x, PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x\n",
+ info->alos, info->rx_los, info->pcs_err_blk_cnt_reg,
+ info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt);
+}
+
+static int hinic_print_hilink_info(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_hilink_link_info *hilink_info = buf_in;
+ struct hinic_link_info *info;
+ enum hilink_info_print_event type;
+
+ if (in_size != sizeof(*hilink_info)) {
+ sdk_err(hwdev->dev_hdl, "Invalid hilink info message size %d, should be %ld\n",
+ in_size, sizeof(*hilink_info));
+ return -EINVAL;
+ }
+
+ ((struct hinic_hilink_link_info *)buf_out)->status = 0;
+ *out_size = sizeof(*hilink_info);
+
+ info = &hilink_info->info;
+ type = hilink_info->info_type;
+
+ if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) {
+ sdk_info(hwdev->dev_hdl, "Invalid hilink info report, type: %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ sdk_info(hwdev->dev_hdl, "Hilink info report after %s\n",
+ hilink_info_report_type[type]);
+
+ print_hilink_info(hwdev, type, info);
+
+ return 0;
+}
+
+int hinic_hilink_info_show(struct hinic_hwdev *hwdev)
+{
+ struct hinic_link_info hilink_info = { {0} };
+ int err;
+
+ err = hinic_get_hilink_link_info(hwdev, &hilink_info);
+ if (err) {
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED)
+ sdk_info(hwdev->dev_hdl, "Unsupport to get hilink info\n");
+ return err;
+ }
+
+ if (hilink_info.cable_absent) {
+ sdk_info(hwdev->dev_hdl, "Cable unpresent\n");
+ return 0;
+ }
+
+ sdk_info(hwdev->dev_hdl, "Current state of hilink info:\n");
+ print_hilink_info(hwdev, HILINK_EVENT_MAX_TYPE, &hilink_info);
+
+ return 0;
+}
+
+/* public process for this event:
+ * pf link change event
+ * pf heart lost event ,TBD
+ * pf fault report event
+ * vf link change event
+ * vf heart lost event, TBD
+ * vf fault report event, TBD
+ */
+static void _event_handler(struct hinic_hwdev *hwdev, enum hinic_event_cmd cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_vf_dcb_state *vf_dcb;
+
+ if (!hwdev)
+ return;
+
+ *out_size = 0;
+
+ switch (cmd) {
+ case HINIC_EVENT_LINK_STATUS_CHANGE:
+ link_status_event_handler(hwdev, buf_in, in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_EVENT_CABLE_PLUG:
+ case HINIC_EVENT_LINK_ERR:
+ module_status_event(hwdev, cmd, buf_in, in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_EVENT_HILINK_INFO:
+ hinic_print_hilink_info(hwdev, buf_in, in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_EVENT_MGMT_FAULT:
+ fault_event_handler(hwdev, buf_in, in_size, buf_out, out_size);
+ break;
+
+ case HINIC_EVENT_HEARTBEAT_LOST:
+ heartbeat_lost_event_handler(hwdev);
+ break;
+
+ case HINIC_EVENT_SET_VF_COS:
+ vf_dcb = buf_in;
+ if (!vf_dcb)
+ break;
+
+ hinic_notify_dcb_state_event(hwdev, &vf_dcb->state);
+
+ break;
+
+ case HINIC_EVENT_MGMT_WATCHDOG:
+ mgmt_watchdog_timeout_event_handler(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ case HINIC_EVENT_MGMT_RESET:
+ mgmt_reset_event_handler(hwdev);
+ break;
+
+ case HINIC_EVENT_MGMT_FMW_ACT_NTC:
+ hinic_fmw_act_ntc_handler(hwdev, buf_in, in_size, buf_out,
+ out_size);
+
+ break;
+
+ case HINIC_EVENT_MGMT_PCIE_DFX:
+ hinic_pcie_dfx_event_handler(hwdev, buf_in, in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_EVENT_MCTP_HOST_INFO:
+ hinic_mctp_get_host_info_event_handler(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ case HINIC_EVENT_SFP_INFO_REPORT:
+ port_sfp_info_event(hwdev, buf_in, in_size, buf_out, out_size);
+ break;
+
+ case HINIC_EVENT_SFP_ABS_REPORT:
+ port_sfp_abs_event(hwdev, buf_in, in_size, buf_out, out_size);
+ break;
+
+ default:
+ sdk_warn(hwdev->dev_hdl, "Unsupported event %d to process\n",
+ cmd);
+ break;
+ }
+}
+
+/* vf link change event
+ * vf fault report event, TBD
+ */
+static int vf_nic_event_handler(void *hwdev, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+
+{
+ enum hinic_event_cmd type = __get_event_type(HINIC_MOD_L2NIC, cmd);
+
+ if (type == HINIC_EVENT_MAX_TYPE) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport L2NIC event: cmd %d\n", cmd);
+ *out_size = 0;
+ return -EINVAL;
+ }
+
+ _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size);
+
+ return 0;
+}
+
+static int vf_comm_event_handler(void *hwdev, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+
+{
+ enum hinic_event_cmd type = __get_event_type(HINIC_MOD_COMM, cmd);
+
+ if (type == HINIC_EVENT_MAX_TYPE) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport COMM event: cmd %d\n", cmd);
+ *out_size = 0;
+ return -EFAULT;
+ }
+
+ _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size);
+
+ return 0;
+}
+
+/* pf link change event */
+static void pf_nic_event_handler(void *hwdev, void *pri_handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ enum hinic_event_cmd type = __get_event_type(HINIC_MOD_L2NIC, cmd);
+
+ if (type == HINIC_EVENT_MAX_TYPE) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport L2NIC event: cmd %d\n", cmd);
+ *out_size = 0;
+ return;
+ }
+
+ _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size);
+}
+
+static void pf_hilink_event_handler(void *hwdev, void *pri_handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ enum hinic_event_cmd type = __get_event_type(HINIC_MOD_HILINK, cmd);
+
+ if (type == HINIC_EVENT_MAX_TYPE) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport HILINK event: cmd %d\n", cmd);
+ *out_size = 0;
+ return;
+ }
+
+ _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size);
+}
+
+/* pf fault report event */
+static void pf_fault_event_handler(void *hwdev,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MGMT_FAULT, buf_in,
+ in_size, buf_out, out_size);
+}
+
+static void mgmt_watchdog_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MGMT_WATCHDOG, buf_in,
+ in_size, buf_out, out_size);
+}
+
+static void mgmt_fmw_act_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MGMT_FMW_ACT_NTC, buf_in,
+ in_size, buf_out, out_size);
+}
+
+static void mgmt_pcie_dfx_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MGMT_PCIE_DFX, buf_in,
+ in_size, buf_out, out_size);
+}
+
+static void mgmt_get_mctp_event_handler(void *hwdev, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ _event_handler(hwdev, HINIC_EVENT_MCTP_HOST_INFO, buf_in,
+ in_size, buf_out, out_size);
+}
+
+static void pf_event_register(struct hinic_hwdev *hwdev)
+{
+ if (hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) {
+ hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_L2NIC,
+ hwdev, pf_nic_event_handler);
+ hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_HILINK,
+ hwdev,
+ pf_hilink_event_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_FAULT_REPORT,
+ pf_fault_event_handler);
+
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_WATCHDOG_INFO,
+ mgmt_watchdog_event_handler);
+
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_FMW_ACT_NTC,
+ mgmt_fmw_act_event_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_PCIE_DFX_NTC,
+ mgmt_pcie_dfx_event_handler);
+ hinic_comm_recv_mgmt_self_cmd_reg(hwdev,
+ HINIC_MGMT_CMD_GET_HOST_INFO,
+ mgmt_get_mctp_event_handler);
+ }
+}
+
+void hinic_event_register(void *dev, void *pri_handle,
+ hinic_event_handler callback)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ if (!dev) {
+ pr_err("Hwdev pointer is NULL for register event\n");
+ return;
+ }
+
+ hwdev->event_callback = callback;
+ hwdev->event_pri_handle = pri_handle;
+
+ if (hinic_func_type(hwdev) != TYPE_VF) {
+ pf_event_register(hwdev);
+ } else {
+ hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ vf_nic_event_handler);
+ hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ vf_comm_event_handler);
+ }
+}
+
+void hinic_event_unregister(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+
+ hwdev->event_callback = NULL;
+ hwdev->event_pri_handle = NULL;
+
+ if (hinic_func_type(hwdev) != TYPE_VF) {
+ hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_L2NIC);
+ hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_HILINK);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_FAULT_REPORT);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_WATCHDOG_INFO);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_FMW_ACT_NTC);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_PCIE_DFX_NTC);
+ hinic_comm_recv_up_self_cmd_unreg(hwdev,
+ HINIC_MGMT_CMD_GET_HOST_INFO);
+ } else {
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ }
+}
+
+/* 0 - heartbeat lost, 1 - normal */
+static u8 hinic_get_heartbeat_status(struct hinic_hwdev *hwdev)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u32 attr1;
+
+ /* suprise remove should be set 1 */
+ if (!hinic_get_chip_present_flag(hwdev))
+ return 1;
+
+ attr1 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR1_ADDR);
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n");
+ hinic_set_chip_absent(hwdev);
+ hinic_force_complete_all(hwdev);
+ /* should notify chiperr to pangea when detecting pcie link down */
+ return 1;
+ }
+
+ return HINIC_AF1_GET(attr1, MGMT_INIT_STATUS);
+}
+
+static void hinic_heartbeat_event_handler(struct work_struct *work)
+{
+ struct hinic_hwdev *hwdev =
+ container_of(work, struct hinic_hwdev, timer_work);
+ u16 out = 0;
+
+ _event_handler(hwdev, HINIC_EVENT_HEARTBEAT_LOST,
+ NULL, 0, &out, &out);
+}
+
+static void hinic_heartbeat_timer_handler(struct timer_list *t)
+{
+ struct hinic_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer);
+
+ if (!hinic_get_heartbeat_status(hwdev)) {
+ hwdev->heartbeat_lost = 1;
+ queue_work(hwdev->workq, &hwdev->timer_work);
+ } else {
+ mod_timer(&hwdev->heartbeat_timer,
+ jiffies + msecs_to_jiffies(HINIC_HEARTBEAT_PERIOD));
+ }
+}
+
+void hinic_init_heartbeat(struct hinic_hwdev *hwdev)
+{
+ timer_setup(&hwdev->heartbeat_timer, hinic_heartbeat_timer_handler, 0);
+
+ hwdev->heartbeat_timer.expires =
+ jiffies + msecs_to_jiffies(HINIC_HEARTBEAT_START_EXPIRE);
+
+ add_timer(&hwdev->heartbeat_timer);
+
+ INIT_WORK(&hwdev->timer_work, hinic_heartbeat_event_handler);
+}
+
+void hinic_destroy_heartbeat(struct hinic_hwdev *hwdev)
+{
+ del_timer_sync(&hwdev->heartbeat_timer);
+}
+
+u8 hinic_nic_sw_aeqe_handler(void *handle, u8 event, u64 data)
+{
+ struct hinic_hwdev *hwdev = (struct hinic_hwdev *)handle;
+ u8 event_level = FAULT_LEVEL_MAX;
+
+ switch (event) {
+ case HINIC_INTERNAL_TSO_FATAL_ERROR:
+ case HINIC_INTERNAL_LRO_FATAL_ERROR:
+ case HINIC_INTERNAL_TX_FATAL_ERROR:
+ case HINIC_INTERNAL_RX_FATAL_ERROR:
+ case HINIC_INTERNAL_OTHER_FATAL_ERROR:
+ atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[event]);
+ sdk_err(hwdev->dev_hdl, "SW aeqe event type: 0x%x, data: 0x%llx\n",
+ event, data);
+ event_level = FAULT_LEVEL_FATAL;
+ break;
+ default:
+ sdk_err(hwdev->dev_hdl, "Unsupported sw event %d to process\n",
+ event);
+ }
+
+ return event_level;
+}
+
+struct hinic_fast_recycled_mode {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 fast_recycled_mode; /* 1: enable fast recycle, available
+ * in dpdk mode,
+ * 0: normal mode, available in kernel
+ * nic mode
+ */
+ u8 rsvd1;
+};
+
+int hinic_enable_fast_recycle(void *hwdev, bool enable)
+{
+ struct hinic_fast_recycled_mode fast_recycled_mode = {0};
+ u16 out_size = sizeof(fast_recycled_mode);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &fast_recycled_mode.func_id);
+ if (err)
+ return err;
+
+ fast_recycled_mode.fast_recycled_mode = enable ? 1 : 0;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET,
+ &fast_recycled_mode,
+ sizeof(fast_recycled_mode),
+ &fast_recycled_mode, &out_size, 0);
+ if (err || fast_recycled_mode.status || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set recycle mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, fast_recycled_mode.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void hinic_set_pcie_order_cfg(void *handle)
+{
+ struct hinic_hwdev *hwdev = handle;
+ u32 val;
+
+ if (!hwdev)
+ return;
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_GLB_DMA_SO_RO_REPLACE_ADDR);
+
+ if (HINIC_GLB_DMA_SO_RO_GET(val, SO_RO_CFG)) {
+ val = HINIC_GLB_DMA_SO_R0_CLEAR(val, SO_RO_CFG);
+ val |= HINIC_GLB_DMA_SO_R0_SET(HINIC_DISABLE_ORDER, SO_RO_CFG);
+ hinic_hwif_write_reg(hwdev->hwif,
+ HINIC_GLB_DMA_SO_RO_REPLACE_ADDR, val);
+ }
+}
+
+int _set_led_status(struct hinic_hwdev *hwdev, u8 port,
+ enum hinic_led_type type,
+ enum hinic_led_mode mode, u8 reset)
+{
+ struct hinic_led_info led_info = {0};
+ u16 out_size = sizeof(led_info);
+ int err;
+
+ led_info.port = port;
+ led_info.reset = reset;
+
+ led_info.type = type;
+ led_info.mode = mode;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_SET_LED_STATUS,
+ &led_info, sizeof(led_info),
+ &led_info, &out_size, 0);
+ if (err || led_info.status || !out_size) {
+ sdk_err(hwdev->dev_hdl, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, led_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_set_led_status(void *hwdev, u8 port, enum hinic_led_type type,
+ enum hinic_led_mode mode)
+{
+ int err;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ err = _set_led_status(hwdev, port, type, mode, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int hinic_reset_led_status(void *hwdev, u8 port)
+{
+ int err;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ err = _set_led_status(hwdev, port, HINIC_LED_TYPE_INVALID,
+ HINIC_LED_MODE_INVALID, 1);
+ if (err) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to reset led status\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_get_board_info(void *hwdev, struct hinic_board_info *info)
+{
+ struct hinic_comm_board_info board_info = {0};
+ u16 out_size = sizeof(board_info);
+ int err;
+
+ if (!hwdev || !info)
+ return -EINVAL;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_BOARD_INFO,
+ &board_info, sizeof(board_info),
+ &board_info, &out_size, 0);
+ if (err || board_info.status || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, board_info.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(info, &board_info.info, sizeof(*info));
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_board_info);
+
+int hinic_get_phy_init_status(void *hwdev,
+ enum phy_init_status_type *init_status)
+{
+ struct hinic_phy_init_status phy_info = {0};
+ u16 out_size = sizeof(phy_info);
+ int err;
+
+ if (!hwdev || !init_status)
+ return -EINVAL;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_PHY_INIT_STATUS,
+ &phy_info, sizeof(phy_info),
+ &phy_info, &out_size, 0);
+ if ((phy_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ phy_info.status) || err || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get phy info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, phy_info.status, out_size);
+ return -EFAULT;
+ }
+
+ *init_status = phy_info.init_status;
+
+ return phy_info.status;
+}
+
+int hinic_phy_init_status_judge(void *hwdev)
+{
+ enum phy_init_status_type init_status;
+ int ret;
+ unsigned long end;
+
+ /* It's not a phy, so don't judge phy status */
+ if (!HINIC_BOARD_IS_PHY((struct hinic_hwdev *)hwdev))
+ return 0;
+
+ end = jiffies + msecs_to_jiffies(PHY_DOING_INIT_TIMEOUT);
+ do {
+ ret = hinic_get_phy_init_status(hwdev, &init_status);
+ if (ret == HINIC_MGMT_CMD_UNSUPPORTED)
+ return 0;
+ else if (ret)
+ return -EFAULT;
+
+ switch (init_status) {
+ case PHY_INIT_SUCCESS:
+ sdk_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is success\n");
+ return 0;
+ case PHY_NONSUPPORT:
+ sdk_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is nonsupport\n");
+ return 0;
+ case PHY_INIT_FAIL:
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is failed\n");
+ return -EIO;
+ case PHY_INIT_DOING:
+ msleep(250);
+ break;
+ default:
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is invalid, init_status: %d\n",
+ init_status);
+ return -EINVAL;
+ }
+ } while (time_before(jiffies, end));
+
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Phy init is timeout\n");
+
+ return -ETIMEDOUT;
+}
+
+static void hinic_set_mgmt_channel_status(void *handle, bool state)
+{
+ struct hinic_hwdev *hwdev = handle;
+ u32 val;
+
+ if (!hwdev || hinic_func_type(hwdev) == TYPE_VF ||
+ !(hwdev->feature_cap & HINIC_FUNC_SUPP_DFX_REG))
+ return;
+
+ val = hinic_hwif_read_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR);
+ val = HINIC_CLEAR_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS);
+ val |= HINIC_SET_MGMT_CHANNEL_STATUS((u32)state, MGMT_CHANNEL_STATUS);
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR, val);
+}
+
+int hinic_get_mgmt_channel_status(void *handle)
+{
+ struct hinic_hwdev *hwdev = handle;
+ u32 val;
+
+ if (!hwdev)
+ return true;
+
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !(hwdev->feature_cap & HINIC_FUNC_SUPP_DFX_REG))
+ return false;
+
+ val = hinic_hwif_read_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR);
+
+ return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS);
+}
+
+static void hinic_enable_mgmt_channel(void *hwdev, void *buf_out)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_update_active *active_info = buf_out;
+
+ if (!active_info || hinic_func_type(hwdev) == TYPE_VF ||
+ !(dev->feature_cap & HINIC_FUNC_SUPP_DFX_REG))
+ return;
+
+ if (!active_info->status &&
+ (active_info->update_status & HINIC_ACTIVE_STATUS_MASK)) {
+ active_info->update_status &= HINIC_ACTIVE_STATUS_CLEAR;
+ return;
+ }
+
+ hinic_set_mgmt_channel_status(hwdev, false);
+}
+
+int hinic_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_bios_cfg_cmd cfg = {0};
+ u16 out_size = sizeof(cfg);
+ u16 func_id;
+ int err;
+
+ if (!hwdev || !pf_bw_limit)
+ return -EINVAL;
+
+ if (HINIC_FUNC_TYPE(dev) == TYPE_VF ||
+ !FUNC_SUPPORT_RATE_LIMIT(hwdev))
+ return 0;
+
+ err = hinic_global_func_id_get(hwdev, &func_id);
+ if (err)
+ return err;
+
+ cfg.func_valid = 1;
+ cfg.func_idx = (u8)func_id;
+
+ cfg.op_code = HINIC_BIOS_CFG_GET | HINIC_BIOS_CFG_PF_BW_LIMIT;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT,
+ &cfg, sizeof(cfg),
+ &cfg, &out_size, 0);
+ if (err || cfg.status || !out_size) {
+ sdk_err(dev->dev_hdl, "Failed to get bios pf bandwidth limit, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, cfg.status, out_size);
+ return -EIO;
+ }
+
+ /* Check data is valid or not */
+ if (cfg.signature != 0x19e51822) {
+ sdk_err(dev->dev_hdl, "Invalid bios configureration data, signature: 0x%x\n",
+ cfg.signature);
+ return -EINVAL;
+ }
+
+ if (cfg.pf_bw_limit > 100) {
+ sdk_err(dev->dev_hdl, "Invalid bios cfg pf bandwidth limit: %d\n",
+ cfg.pf_bw_limit);
+ return -EINVAL;
+ }
+
+ *pf_bw_limit = cfg.pf_bw_limit;
+
+ return 0;
+}
+
+bool hinic_get_ppf_status(void *hwdev)
+{
+ struct hinic_ppf_state ppf_state = {0};
+ struct hinic_hwdev *dev = hwdev;
+ struct card_node *chip_node;
+ u16 out_size = sizeof(ppf_state);
+ int err;
+
+ if (!hwdev)
+ return false;
+
+ chip_node = (struct card_node *)dev->chip_node;
+
+ if (!HINIC_IS_VF(dev))
+ return chip_node->ppf_state;
+
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_PPF_STATE,
+ &ppf_state, sizeof(ppf_state),
+ &ppf_state, &out_size, 0);
+ if (err || ppf_state.status || !out_size) {
+ sdk_err(dev->dev_hdl, "Failed to get ppf state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, ppf_state.status, out_size);
+ return false;
+ }
+
+ return (bool)ppf_state.ppf_state;
+}
+
+void hinic_set_func_deinit_flag(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+
+ set_bit(HINIC_HWDEV_FUNC_DEINIT, &dev->func_state);
+}
+
+int hinic_get_hw_pf_infos(void *hwdev, struct hinic_hw_pf_infos *infos)
+{
+ struct hinic_hw_pf_infos_cmd pf_infos = {0};
+ u16 out_size = sizeof(pf_infos);
+ int err;
+
+ if (!hwdev || !infos)
+ return -EINVAL;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_GET_HW_PF_INFOS,
+ &pf_infos, sizeof(pf_infos),
+ &pf_infos, &out_size, 0);
+ if ((pf_infos.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ pf_infos.status) || err || !out_size) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get hw pf information, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pf_infos.status, out_size);
+ return -EFAULT;
+ }
+
+ if (!pf_infos.status)
+ memcpy(infos, &pf_infos.infos, sizeof(*infos));
+
+ return pf_infos.status;
+}
+EXPORT_SYMBOL(hinic_get_hw_pf_infos);
+
+int hinic_set_ip_check(void *hwdev, bool ip_check_ctl)
+{
+ u32 val = 0;
+ int ret;
+ int i;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ for (i = 0; i <= HINIC_IPSU_CHANNEL_NUM; i++) {
+ ret = hinic_api_csr_rd32(hwdev, HINIC_NODE_ID_IPSU,
+ (HINIC_IPSU_CHANNEL0_ADDR +
+ i * HINIC_IPSU_CHANNEL_OFFSET), &val);
+ if (ret)
+ return ret;
+
+ val = be32_to_cpu(val);
+ if (ip_check_ctl)
+ val |= HINIC_IPSU_DIP_SIP_MASK;
+ else
+ val &= (~HINIC_IPSU_DIP_SIP_MASK);
+
+ val = cpu_to_be32(val);
+ ret = hinic_api_csr_wr32(hwdev, HINIC_NODE_ID_IPSU,
+ (HINIC_IPSU_CHANNEL0_ADDR +
+ i * HINIC_IPSU_CHANNEL_OFFSET), val);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int hinic_get_card_present_state(void *hwdev, bool *card_present_state)
+{
+ u32 addr, attr1;
+
+ if (!hwdev || !card_present_state)
+ return -EINVAL;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(((struct hinic_hwdev *)hwdev)->hwif, addr);
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, "Card is not present\n");
+ *card_present_state = (bool)0;
+ } else {
+ *card_present_state = (bool)1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_card_present_state);
+
+void hinic_disable_mgmt_msg_report(void *hwdev)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+
+ hinic_set_pf_status(hw_dev->hwif, HINIC_PF_STATUS_INIT);
+}
+
+int hinic_set_vxlan_udp_dport(void *hwdev, u32 udp_port)
+{
+ u32 val = 0;
+ int ret;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ ret = hinic_api_csr_rd32(hwdev, HINIC_NODE_ID_IPSU,
+ HINIC_IPSURX_VXLAN_DPORT_ADDR, &val);
+ if (ret)
+ return ret;
+
+ nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Update VxLAN UDP dest port: cur port:%u, new port:%u",
+ be32_to_cpu(val), udp_port);
+
+ if (be32_to_cpu(val) == udp_port)
+ return 0;
+
+ udp_port = cpu_to_be32(udp_port);
+ ret = hinic_api_csr_wr32(hwdev, HINIC_NODE_ID_IPSU,
+ HINIC_IPSURX_VXLAN_DPORT_ADDR, udp_port);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
new file mode 100644
index 000000000000..22a95ea184df
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_HWDEV_H_
+#define HINIC_HWDEV_H_
+
+#include "hinic_port_cmd.h"
+
+/* to use 0-level CLA, page size must be: 64B(wqebb) * 4096(max_q_depth) */
+#define HINIC_DEFAULT_WQ_PAGE_SIZE 0x40000
+#define HINIC_HW_WQ_PAGE_SIZE 0x1000
+
+#define HINIC_MSG_TO_MGMT_MAX_LEN 2016
+
+#define HINIC_MGMT_STATUS_ERR_OK 0 /* Ok */
+#define HINIC_MGMT_STATUS_ERR_PARAM 1 /* Invalid parameter */
+#define HINIC_MGMT_STATUS_ERR_FAILED 2 /* Operation failed */
+#define HINIC_MGMT_STATUS_ERR_PORT 3 /* Invalid port */
+#define HINIC_MGMT_STATUS_ERR_TIMEOUT 4 /* Operation time out */
+#define HINIC_MGMT_STATUS_ERR_NOMATCH 5 /* Version not match */
+#define HINIC_MGMT_STATUS_ERR_EXIST 6 /* Entry exists */
+#define HINIC_MGMT_STATUS_ERR_NOMEM 7 /* Out of memory */
+#define HINIC_MGMT_STATUS_ERR_INIT 8 /* Feature not initialized */
+#define HINIC_MGMT_STATUS_ERR_FAULT 9 /* Invalid address */
+#define HINIC_MGMT_STATUS_ERR_PERM 10 /* Operation not permitted */
+#define HINIC_MGMT_STATUS_ERR_EMPTY 11 /* Table empty */
+#define HINIC_MGMT_STATUS_ERR_FULL 12 /* Table full */
+#define HINIC_MGMT_STATUS_ERR_NOT_FOUND 13 /* Not found */
+#define HINIC_MGMT_STATUS_ERR_BUSY 14 /* Device or resource busy */
+#define HINIC_MGMT_STATUS_ERR_RESOURCE 15 /* No resources for operation */
+#define HINIC_MGMT_STATUS_ERR_CONFIG 16 /* Invalid configuration */
+#define HINIC_MGMT_STATUS_ERR_UNAVAIL 17 /* Feature unavailable */
+#define HINIC_MGMT_STATUS_ERR_CRC 18 /* CRC check failed */
+#define HINIC_MGMT_STATUS_ERR_NXIO 19 /* No such device or address */
+#define HINIC_MGMT_STATUS_ERR_ROLLBACK 20 /* Chip rollback fail */
+#define HINIC_MGMT_STATUS_ERR_LEN 32 /* Length too short or too long */
+#define HINIC_MGMT_STATUS_ERR_UNSUPPORT 0xFF /* Feature not supported */
+
+#define HINIC_CHIP_PRESENT 1
+#define HINIC_CHIP_ABSENT 0
+
+struct cfg_mgmt_info;
+struct rdma_comp_resource;
+
+struct hinic_hwif;
+struct hinic_nic_io;
+struct hinic_wqs;
+struct hinic_aeqs;
+struct hinic_ceqs;
+struct hinic_mbox_func_to_func;
+struct hinic_msg_pf_to_mgmt;
+struct hinic_cmdqs;
+struct hinic_multi_host_mgmt;
+
+struct hinic_root_ctxt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u16 rsvd1;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u8 lro_en;
+ u8 rsvd2;
+ u8 ppf_idx;
+ u8 rsvd3;
+ u16 rq_depth;
+ u16 rx_buf_sz;
+ u16 sq_depth;
+};
+
+struct hinic_page_addr {
+ void *virt_addr;
+ u64 phys_addr;
+};
+
+struct mqm_addr_trans_tbl_info {
+ u32 chunk_num;
+ u32 search_gpa_num;
+ u32 page_size;
+ u32 page_num;
+ struct hinic_page_addr *brm_srch_page_addr;
+};
+
+#define HINIC_PCIE_LINK_DOWN 0xFFFFFFFF
+
+#define HINIC_DEV_ACTIVE_FW_TIMEOUT (35 * 1000)
+#define HINIC_DEV_BUSY_ACTIVE_FW 0xFE
+
+#define HINIC_HW_WQ_NAME "hinic_hardware"
+#define HINIC_HEARTBEAT_PERIOD 1000
+#define HINIC_HEARTBEAT_START_EXPIRE 5000
+
+#define HINIC_CHIP_ERROR_TYPE_MAX 1024
+#define HINIC_CHIP_FAULT_SIZE \
+ (HINIC_NODE_ID_MAX * FAULT_LEVEL_MAX * HINIC_CHIP_ERROR_TYPE_MAX)
+
+enum hinic_node_id {
+ HINIC_NODE_ID_CPI = 0,
+ HINIC_NODE_ID_IPSU = 4,
+ HINIC_NODE_ID_MGMT_HOST = 21, /* Host CPU send API to uP */
+ HINIC_NODE_ID_MAX = 22
+};
+
+#define HINIC_HWDEV_INIT_MODES_MASK ((1UL << HINIC_HWDEV_ALL_INITED) - 1)
+
+enum hinic_hwdev_func_state {
+ HINIC_HWDEV_FUNC_INITED = HINIC_HWDEV_ALL_INITED,
+
+ HINIC_HWDEV_FUNC_DEINIT,
+
+ HINIC_HWDEV_STATE_BUSY = 31,
+};
+
+struct hinic_cqm_stats {
+ atomic_t cqm_cmd_alloc_cnt;
+ atomic_t cqm_cmd_free_cnt;
+ atomic_t cqm_send_cmd_box_cnt;
+ atomic_t cqm_send_cmd_imm_cnt;
+ atomic_t cqm_db_addr_alloc_cnt;
+ atomic_t cqm_db_addr_free_cnt;
+
+ atomic_t cqm_fc_srq_create_cnt;
+ atomic_t cqm_srq_create_cnt;
+ atomic_t cqm_rq_create_cnt;
+
+ atomic_t cqm_qpc_mpt_create_cnt;
+ atomic_t cqm_nonrdma_queue_create_cnt;
+ atomic_t cqm_rdma_queue_create_cnt;
+ atomic_t cqm_rdma_table_create_cnt;
+
+ atomic_t cqm_qpc_mpt_delete_cnt;
+ atomic_t cqm_nonrdma_queue_delete_cnt;
+ atomic_t cqm_rdma_queue_delete_cnt;
+ atomic_t cqm_rdma_table_delete_cnt;
+
+ atomic_t cqm_func_timer_clear_cnt;
+ atomic_t cqm_func_hash_buf_clear_cnt;
+
+ atomic_t cqm_scq_callback_cnt;
+ atomic_t cqm_ecq_callback_cnt;
+ atomic_t cqm_nocq_callback_cnt;
+ atomic_t cqm_aeq_callback_cnt[112];
+};
+
+struct hinic_link_event_stats {
+ atomic_t link_down_stats;
+ atomic_t link_up_stats;
+};
+
+struct hinic_fault_event_stats {
+ atomic_t chip_fault_stats[HINIC_NODE_ID_MAX][FAULT_LEVEL_MAX];
+ atomic_t fault_type_stat[FAULT_TYPE_MAX];
+ atomic_t pcie_fault_stats;
+};
+
+struct hinic_hw_stats {
+ atomic_t heart_lost_stats;
+ atomic_t nic_ucode_event_stats[HINIC_NIC_FATAL_ERROR_MAX];
+ struct hinic_cqm_stats cqm_stats;
+ struct hinic_link_event_stats link_event_stats;
+ struct hinic_fault_event_stats fault_event_stats;
+};
+
+#define HINIC_NORMAL_HOST_CAP (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \
+ HINIC_FUNC_SUPP_RATE_LIMIT | \
+ HINIC_FUNC_SUPP_DFX_REG | \
+ HINIC_FUNC_SUPP_RX_MODE | \
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \
+ HINIC_FUNC_SUPP_CHANGE_MAC | \
+ HINIC_FUNC_SUPP_ENCAP_TSO_CSUM)
+#define HINIC_MULTI_BM_MASTER (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \
+ HINIC_FUNC_SUPP_DFX_REG | \
+ HINIC_FUNC_SUPP_RX_MODE | \
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \
+ HINIC_FUNC_SUPP_CHANGE_MAC)
+#define HINIC_MULTI_BM_SLAVE (HINIC_FUNC_SRIOV_EN_DFLT | \
+ HINIC_FUNC_SRIOV_NUM_FIX | \
+ HINIC_FUNC_FORCE_LINK_UP | \
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP)
+#define HINIC_MULTI_VM_MASTER (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \
+ HINIC_FUNC_SUPP_DFX_REG | \
+ HINIC_FUNC_SUPP_RX_MODE | \
+ HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \
+ HINIC_FUNC_SUPP_CHANGE_MAC)
+#define HINIC_MULTI_VM_SLAVE (HINIC_FUNC_MGMT | \
+ HINIC_FUNC_SUPP_DFX_REG | \
+ HINIC_FUNC_SRIOV_EN_DFLT | \
+ HINIC_FUNC_SUPP_RX_MODE | \
+ HINIC_FUNC_SUPP_CHANGE_MAC | \
+ HINIC_FUNC_OFFLOAD_OVS_UNSUPP)
+
+#define MULTI_HOST_CHIP_MODE_SHIFT 0
+#define MULTI_HOST_MASTER_MBX_STS_SHIFT 0x4
+#define MULTI_HOST_PRIV_DATA_SHIFT 0x8
+
+#define MULTI_HOST_CHIP_MODE_MASK 0xF
+#define MULTI_HOST_MASTER_MBX_STS_MASK 0xF
+#define MULTI_HOST_PRIV_DATA_MASK 0xFFFF
+
+#define MULTI_HOST_REG_SET(val, member) \
+ (((val) & MULTI_HOST_##member##_MASK) \
+ << MULTI_HOST_##member##_SHIFT)
+#define MULTI_HOST_REG_GET(val, member) \
+ (((val) >> MULTI_HOST_##member##_SHIFT) \
+ & MULTI_HOST_##member##_MASK)
+#define MULTI_HOST_REG_CLEAR(val, member) \
+ ((val) & (~(MULTI_HOST_##member##_MASK \
+ << MULTI_HOST_##member##_SHIFT)))
+
+#define HINIC_BOARD_TYPE_MULTI_HOST_ETH_25GE 12
+
+/* new version of roce qp not limited by power of 2 */
+#define HINIC_CMD_VER_ROCE_QP 1
+/* new version for add function id in multi-host */
+#define HINIC_CMD_VER_FUNC_ID 2
+
+struct hinic_hwdev {
+ void *adapter_hdl; /* pointer to hinic_pcidev or NDIS_Adapter */
+ void *pcidev_hdl; /* pointer to pcidev or Handler */
+ void *dev_hdl; /* pointer to pcidev->dev or Handler, for
+ * sdk_err() or dma_alloc()
+ */
+ u32 wq_page_size;
+
+ void *cqm_hdl;
+ void *chip_node;
+
+ struct hinic_hwif *hwif; /* include void __iomem *bar */
+ struct hinic_nic_io *nic_io;
+ struct cfg_mgmt_info *cfg_mgmt;
+ struct rdma_comp_resource *rdma_comp_res;
+ struct hinic_wqs *wqs; /* for FC slq */
+ struct mqm_addr_trans_tbl_info mqm_att;
+
+ struct hinic_aeqs *aeqs;
+ struct hinic_ceqs *ceqs;
+
+ struct hinic_mbox_func_to_func *func_to_func;
+
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt;
+
+ struct hinic_cmdqs *cmdqs;
+
+ struct hinic_page_addr page_pa0;
+ struct hinic_page_addr page_pa1;
+
+ hinic_event_handler event_callback;
+ void *event_pri_handle;
+
+ struct work_struct timer_work;
+ struct workqueue_struct *workq;
+ struct timer_list heartbeat_timer;
+ /* true represent heartbeat lost, false represent heartbeat restore */
+ u32 heartbeat_lost;
+ int chip_present_flag;
+ struct hinic_hw_stats hw_stats;
+ u8 *chip_fault_stats;
+
+ u32 statufull_ref_cnt;
+ ulong func_state;
+
+ u64 feature_cap; /* enum hinic_func_cap */
+ enum hinic_func_mode func_mode;
+
+ struct hinic_multi_host_mgmt *mhost_mgmt;
+
+ /* In bmgw x86 host, driver can't send message to mgmt cpu directly,
+ * need to trasmit message ppf mbox to bmgw arm host.
+ */
+ struct semaphore ppf_sem;
+ void *ppf_hwdev;
+
+ struct semaphore func_sem;
+ int func_ref;
+ struct hinic_board_info board_info;
+#define MGMT_VERSION_MAX_LEN 32
+ u8 mgmt_ver[MGMT_VERSION_MAX_LEN];
+ u64 fw_support_func_flag;
+};
+
+int hinic_init_comm_ch(struct hinic_hwdev *hwdev);
+
+void hinic_uninit_comm_ch(struct hinic_hwdev *hwdev);
+
+int hinic_ppf_ext_db_init(void *dev);
+
+int hinic_ppf_ext_db_deinit(void *dev);
+
+enum hinic_set_arm_type {
+ HINIC_SET_ARM_CMDQ,
+ HINIC_SET_ARM_SQ,
+ HINIC_SET_ARM_TYPE_NUM,
+};
+
+int hinic_set_arm_bit(void *hwdev, enum hinic_set_arm_type q_type, u16 q_id);
+
+void hinic_set_chip_present(void *hwdev);
+void hinic_force_complete_all(void *hwdev);
+
+void hinic_init_heartbeat(struct hinic_hwdev *hwdev);
+void hinic_destroy_heartbeat(struct hinic_hwdev *hwdev);
+
+u8 hinic_nic_sw_aeqe_handler(void *handle, u8 event, u64 data);
+
+int hinic_enable_fast_recycle(void *hwdev, bool enable);
+int hinic_l2nic_reset_base(struct hinic_hwdev *hwdev, u16 reset_flag);
+
+enum l2nic_resource_type {
+ RES_TYPE_NIC_FUNC = 0,
+ RES_TYPE_FLUSH_BIT,
+ RES_TYPE_PF_BW_CFG,
+ RES_TYPE_MQM,
+ RES_TYPE_SMF,
+ RES_TYPE_CMDQ_ROOTCTX,
+ RES_TYPE_SQ_CI_TABLE,
+ RES_TYPE_CEQ,
+ RES_TYPE_MBOX,
+ RES_TYPE_AEQ,
+};
+
+void hinic_notify_dcb_state_event(struct hinic_hwdev *hwdev,
+ struct hinic_dcb_state *dcb_state);
+
+int hinic_pf_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size, u32 timeout);
+
+int hinic_pf_send_clp_cmd(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+int hinic_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit);
+
+bool hinic_mgmt_event_ack_first(u8 mod, u8 cmd);
+
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+
+int hinic_phy_init_status_judge(void *hwdev);
+
+int hinic_hilink_info_show(struct hinic_hwdev *hwdev);
+int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val);
+int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val);
+
+int hinic_ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+#define HINIC_SDI_MODE_UNKNOWN 0
+#define HINIC_SDI_MODE_BM 1
+#define HINIC_SDI_MODE_VM 2
+#define HINIC_SDI_MODE_MAX 3
+int hinic_get_sdi_mode(struct hinic_hwdev *hwdev, u16 *cur_mode);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.c b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c
new file mode 100644
index 000000000000..b894235b6edb
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/io-mapping.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+#include "hinic_eqs.h"
+
+#define WAIT_HWIF_READY_TIMEOUT 10000
+
+#define HINIC_SELFTEST_RESULT 0x883C
+
+/* For UEFI driver, this function can only read BAR0 */
+u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg)
+{
+ return be32_to_cpu(readl(hwif->cfg_regs_base + reg));
+}
+
+/* For UEFI driver, this function can only write BAR0 */
+void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val)
+{
+ writel(cpu_to_be32(val), hwif->cfg_regs_base + reg);
+}
+
+/**
+ * hwif_ready - test if the HW initialization passed
+ * @hwdev: the pointer to hw device
+ * Return: 0 - success, negative - failure
+ */
+static int hwif_ready(struct hinic_hwdev *hwdev)
+{
+ u32 addr, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwdev->hwif, addr);
+
+ if (attr1 == HINIC_PCIE_LINK_DOWN)
+ return -EBUSY;
+
+ if (!HINIC_AF1_GET(attr1, MGMT_INIT_STATUS))
+ return -EBUSY;
+
+ if (HINIC_IS_VF(hwdev)) {
+ if (!HINIC_AF1_GET(attr1, PF_INIT_STATUS))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int wait_hwif_ready(struct hinic_hwdev *hwdev)
+{
+ ulong timeout = 0;
+
+ do {
+ if (!hwif_ready(hwdev))
+ return 0;
+
+ usleep_range(999, 1000);
+ timeout++;
+ } while (timeout <= WAIT_HWIF_READY_TIMEOUT);
+
+ sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n");
+ return -EBUSY;
+}
+
+/**
+ * set_hwif_attr - set the attributes as members in hwif
+ * @hwif: the hardware interface of a pci function device
+ * @attr0: the first attribute that was read from the hw
+ * @attr1: the second attribute that was read from the hw
+ * @attr2: the third attribute that was read from the hw
+ */
+static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
+ u32 attr2)
+{
+ hwif->attr.func_global_idx = HINIC_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+ hwif->attr.port_to_port_idx = HINIC_AF0_GET(attr0, P2P_IDX);
+ hwif->attr.pci_intf_idx = HINIC_AF0_GET(attr0, PCI_INTF_IDX);
+ hwif->attr.vf_in_pf = HINIC_AF0_GET(attr0, VF_IN_PF);
+ hwif->attr.func_type = HINIC_AF0_GET(attr0, FUNC_TYPE);
+
+ hwif->attr.ppf_idx = HINIC_AF1_GET(attr1, PPF_IDX);
+
+ hwif->attr.num_aeqs = BIT(HINIC_AF1_GET(attr1, AEQS_PER_FUNC));
+ hwif->attr.num_ceqs = BIT(HINIC_AF1_GET(attr1, CEQS_PER_FUNC));
+ hwif->attr.num_irqs = BIT(HINIC_AF1_GET(attr1, IRQS_PER_FUNC));
+ hwif->attr.num_dma_attr = BIT(HINIC_AF1_GET(attr1, DMA_ATTR_PER_FUNC));
+
+ hwif->attr.global_vf_id_of_pf = HINIC_AF2_GET(attr2,
+ GLOBAL_VF_ID_OF_PF);
+}
+
+/**
+ * get_hwif_attr - read and set the attributes as members in hwif
+ * @hwif: the hardware interface of a pci function device
+ */
+static void get_hwif_attr(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0, attr1, attr2;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwif, addr);
+
+ addr = HINIC_CSR_FUNC_ATTR2_ADDR;
+ attr2 = hinic_hwif_read_reg(hwif, addr);
+
+ set_hwif_attr(hwif, attr0, attr1, attr2);
+}
+
+void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status)
+{
+ u32 attr5 = HINIC_AF5_SET(status, PF_STATUS);
+ u32 addr = HINIC_CSR_FUNC_ATTR5_ADDR;
+
+ if (hwif->attr.func_type == TYPE_VF)
+ return;
+
+ hinic_hwif_write_reg(hwif, addr, attr5);
+}
+
+enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif)
+{
+ u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
+
+ return HINIC_AF5_GET(attr5, PF_STATUS);
+}
+
+enum hinic_doorbell_ctrl hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC_AF4_GET(attr4, DOORBELL_CTRL);
+}
+
+enum hinic_outbound_ctrl hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif)
+{
+ u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC_AF4_GET(attr4, OUTBOUND_CTRL);
+}
+
+void hinic_enable_doorbell(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL);
+ attr4 |= HINIC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+void hinic_disable_doorbell(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL);
+ attr4 |= HINIC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+void hinic_enable_outbound(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, OUTBOUND_CTRL);
+ attr4 |= HINIC_AF4_SET(ENABLE_OUTBOUND, OUTBOUND_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+void hinic_disable_outbound(struct hinic_hwif *hwif)
+{
+ u32 addr, attr4;
+
+ addr = HINIC_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic_hwif_read_reg(hwif, addr);
+
+ attr4 = HINIC_AF4_CLEAR(attr4, OUTBOUND_CTRL);
+ attr4 |= HINIC_AF4_SET(DISABLE_OUTBOUND, OUTBOUND_CTRL);
+
+ hinic_hwif_write_reg(hwif, addr, attr4);
+}
+
+/**
+ * set_ppf - try to set hwif as ppf and set the type of hwif in this case
+ * @hwif: the hardware interface of a pci function device
+ */
+static void set_ppf(struct hinic_hwif *hwif)
+{
+ struct hinic_func_attr *attr = &hwif->attr;
+ u32 addr, val, ppf_election;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_PPF_ELECTION_ADDR;
+
+ val = hinic_hwif_read_reg(hwif, addr);
+ val = HINIC_PPF_ELECTION_CLEAR(val, IDX);
+
+ ppf_election = HINIC_PPF_ELECTION_SET(attr->func_global_idx, IDX);
+ val |= ppf_election;
+
+ hinic_hwif_write_reg(hwif, addr, val);
+
+ /* Check PPF */
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX);
+ if (attr->ppf_idx == attr->func_global_idx)
+ attr->func_type = TYPE_PPF;
+}
+
+/**
+ * get_mpf - get the mpf index into the hwif
+ * @hwif: the hardware interface of a pci function device
+ */
+static void get_mpf(struct hinic_hwif *hwif)
+{
+ struct hinic_func_attr *attr = &hwif->attr;
+ u32 mpf_election, addr;
+
+ addr = HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR;
+
+ mpf_election = hinic_hwif_read_reg(hwif, addr);
+ attr->mpf_idx = HINIC_MPF_ELECTION_GET(mpf_election, IDX);
+}
+
+/**
+ * set_mpf - try to set hwif as mpf and set the mpf idx in hwif
+ * @hwif: the hardware interface of a pci function device
+ */
+static void set_mpf(struct hinic_hwif *hwif)
+{
+ struct hinic_func_attr *attr = &hwif->attr;
+ u32 addr, val, mpf_election;
+
+ /* Read Modify Write */
+ addr = HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR;
+
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ val = HINIC_MPF_ELECTION_CLEAR(val, IDX);
+ mpf_election = HINIC_MPF_ELECTION_SET(attr->func_global_idx, IDX);
+
+ val |= mpf_election;
+ hinic_hwif_write_reg(hwif, addr, val);
+}
+
+static void init_db_area_idx(struct hinic_hwif *hwif)
+{
+ struct hinic_free_db_area *free_db_area;
+ u32 db_max_areas;
+ u32 i;
+
+ free_db_area = &hwif->free_db_area;
+ db_max_areas = hwif->db_size / HINIC_DB_PAGE_SIZE;
+
+ for (i = 0; i < db_max_areas; i++)
+ free_db_area->db_idx[i] = i;
+
+ free_db_area->num_free = db_max_areas;
+
+ spin_lock_init(&free_db_area->idx_lock);
+}
+
+static int get_db_idx(struct hinic_hwif *hwif, u32 *idx)
+{
+ struct hinic_free_db_area *free_db_area = &hwif->free_db_area;
+ u32 db_max_areas = hwif->db_size / HINIC_DB_PAGE_SIZE;
+ u32 pos;
+ u32 pg_idx;
+
+ spin_lock(&free_db_area->idx_lock);
+
+retry:
+ if (free_db_area->num_free == 0) {
+ spin_unlock(&free_db_area->idx_lock);
+ return -ENOMEM;
+ }
+
+ free_db_area->num_free--;
+
+ pos = free_db_area->alloc_pos++;
+ pos &= db_max_areas - 1;
+
+ pg_idx = free_db_area->db_idx[pos];
+
+ free_db_area->db_idx[pos] = 0xFFFFFFFF;
+
+ /* pg_idx out of range */
+ if (pg_idx >= db_max_areas)
+ goto retry;
+
+ spin_unlock(&free_db_area->idx_lock);
+
+ *idx = pg_idx;
+
+ return 0;
+}
+
+static void free_db_idx(struct hinic_hwif *hwif, u32 idx)
+{
+ struct hinic_free_db_area *free_db_area = &hwif->free_db_area;
+ u32 db_max_areas = hwif->db_size / HINIC_DB_PAGE_SIZE;
+ u32 pos;
+
+ if (idx >= db_max_areas)
+ return;
+
+ spin_lock(&free_db_area->idx_lock);
+
+ pos = free_db_area->return_pos++;
+ pos &= db_max_areas - 1;
+
+ free_db_area->db_idx[pos] = idx;
+
+ free_db_area->num_free++;
+
+ spin_unlock(&free_db_area->idx_lock);
+}
+
+void hinic_free_db_addr(void *hwdev, void __iomem *db_base,
+ void __iomem *dwqe_base)
+{
+ struct hinic_hwif *hwif;
+ u32 idx;
+
+ if (!hwdev || !db_base)
+ return;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ idx = DB_IDX(db_base, hwif->db_base);
+
+#if defined(__aarch64__)
+ /* No need to unmap */
+#else
+ if (dwqe_base && hwif->chip_mode == CHIP_MODE_NORMAL)
+ io_mapping_unmap(dwqe_base);
+#endif
+
+ free_db_idx(hwif, idx);
+}
+EXPORT_SYMBOL(hinic_free_db_addr);
+
+int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base)
+{
+ struct hinic_hwif *hwif;
+ u64 offset;
+ u32 idx;
+ int err;
+
+ if (!hwdev || !db_base)
+ return -EINVAL;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return -EFAULT;
+
+ *db_base = hwif->db_base + idx * HINIC_DB_PAGE_SIZE;
+
+ if (!dwqe_base || hwif->chip_mode != CHIP_MODE_NORMAL)
+ return 0;
+
+ offset = ((u64)idx) << PAGE_SHIFT;
+
+#if defined(__aarch64__)
+ *dwqe_base = hwif->dwqe_mapping + offset;
+#else
+ *dwqe_base = io_mapping_map_wc(hwif->dwqe_mapping, offset,
+ HINIC_DB_PAGE_SIZE);
+#endif
+
+ if (!(*dwqe_base)) {
+ hinic_free_db_addr(hwdev, *db_base, NULL);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_alloc_db_addr);
+
+void hinic_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base)
+{
+ struct hinic_hwif *hwif;
+ u32 idx;
+
+ if (!hwdev)
+ return;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+ idx = DB_IDX(db_base, hwif->db_base_phy);
+
+ free_db_idx(hwif, idx);
+}
+EXPORT_SYMBOL(hinic_free_db_phy_addr);
+
+int hinic_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base)
+{
+ struct hinic_hwif *hwif;
+ u32 idx;
+ int err;
+
+ if (!hwdev || !db_base || !dwqe_base)
+ return -EINVAL;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return -EFAULT;
+
+ *db_base = hwif->db_base_phy + idx * HINIC_DB_PAGE_SIZE;
+
+ if (hwif->chip_mode == CHIP_MODE_NORMAL)
+ *dwqe_base = *db_base + HINIC_DB_DWQE_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_alloc_db_phy_addr);
+
+enum hinic_msix_state hinic_get_msix_state(void *hwdev, u16 msix_idx)
+{
+ struct hinic_hwif *hwif = NULL;
+ u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE +
+ HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL;
+ u32 mask_bits;
+
+ if (!hwdev)
+ return HINIC_MSIX_DISABLE;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ mask_bits = readl(hwif->intr_regs_base + offset);
+
+ return !!(mask_bits & HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT);
+}
+
+void hinic_set_msix_state(void *hwdev, u16 msix_idx, enum hinic_msix_state flag)
+{
+ struct hinic_hwif *hwif;
+ u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE +
+ HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL;
+ u32 mask_bits;
+
+ if (!hwdev)
+ return;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ mask_bits = readl(hwif->intr_regs_base + offset);
+ mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ if (flag)
+ mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ writel(mask_bits, hwif->intr_regs_base + offset);
+}
+EXPORT_SYMBOL(hinic_set_msix_state);
+
+static void disable_all_msix(struct hinic_hwdev *hwdev)
+{
+ u16 num_irqs = hwdev->hwif->attr.num_irqs;
+ u16 i;
+
+ for (i = 0; i < num_irqs; i++)
+ hinic_set_msix_state(hwdev, i, HINIC_MSIX_DISABLE);
+}
+
+int wait_until_doorbell_flush_states(struct hinic_hwif *hwif,
+ enum hinic_doorbell_ctrl states)
+{
+ enum hinic_doorbell_ctrl db_ctrl;
+ u32 cnt = 0;
+
+ if (!hwif)
+ return -EFAULT;
+
+ while (cnt < HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) {
+ db_ctrl = hinic_get_doorbell_ctrl_status(hwif);
+ if (db_ctrl == states)
+ return 0;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return -EFAULT;
+}
+EXPORT_SYMBOL(wait_until_doorbell_flush_states);
+
+static int wait_until_doorbell_and_outbound_enabled(struct hinic_hwif *hwif)
+{
+ enum hinic_doorbell_ctrl db_ctrl;
+ enum hinic_outbound_ctrl outbound_ctrl;
+ u32 cnt = 0;
+
+ while (cnt < HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) {
+ db_ctrl = hinic_get_doorbell_ctrl_status(hwif);
+ outbound_ctrl = hinic_get_outbound_ctrl_status(hwif);
+
+ if (outbound_ctrl == ENABLE_OUTBOUND &&
+ db_ctrl == ENABLE_DOORBELL)
+ return 0;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ return -EFAULT;
+}
+
+static void __print_selftest_reg(struct hinic_hwdev *hwdev)
+{
+ u32 addr, attr0, attr1;
+
+ addr = HINIC_CSR_FUNC_ATTR1_ADDR;
+ attr1 = hinic_hwif_read_reg(hwdev->hwif, addr);
+
+ if (attr1 == HINIC_PCIE_LINK_DOWN) {
+ sdk_err(hwdev->dev_hdl, "PCIE is link down\n");
+ return;
+ }
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwdev->hwif, addr);
+ if (HINIC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF &&
+ !HINIC_AF0_GET(attr0, PCI_INTF_IDX))
+ sdk_err(hwdev->dev_hdl, "Selftest reg: 0x%08x\n",
+ hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_SELFTEST_RESULT));
+}
+
+/**
+ * hinic_init_hwif - initialize the hw interface
+ * @hwdev: the pointer to hw device
+ * @cfg_reg_base: configuration base address
+ * Return: 0 - success, negative - failure
+ */
+int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base,
+ void *intr_reg_base, u64 db_base_phy,
+ void *db_base, void *dwqe_mapping)
+{
+ struct hinic_hwif *hwif;
+ int err;
+
+ hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
+ if (!hwif)
+ return -ENOMEM;
+
+ hwdev->hwif = hwif;
+ hwif->pdev = hwdev->pcidev_hdl;
+
+ hwif->cfg_regs_base = cfg_reg_base;
+ hwif->intr_regs_base = intr_reg_base;
+
+ hwif->db_base_phy = db_base_phy;
+ hwif->db_base = db_base;
+ hwif->dwqe_mapping = dwqe_mapping;
+
+ hwif->db_size = hinic_get_db_size(cfg_reg_base, &hwif->chip_mode);
+
+ sdk_info(hwdev->dev_hdl, "Doorbell size: 0x%x, chip mode: %d\n",
+ hwif->db_size, hwif->chip_mode);
+
+ init_db_area_idx(hwif);
+
+ err = wait_hwif_ready(hwdev);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Chip status is not ready\n");
+ __print_selftest_reg(hwdev);
+ goto hwif_ready_err;
+ }
+
+ get_hwif_attr(hwif);
+
+ err = wait_until_doorbell_and_outbound_enabled(hwif);
+ if (err) {
+ sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled\n");
+ goto hwif_ready_err;
+ }
+
+ if (!HINIC_IS_VF(hwdev)) {
+ set_ppf(hwif);
+
+ if (HINIC_IS_PPF(hwdev))
+ set_mpf(hwif);
+
+ get_mpf(hwif);
+ }
+
+ disable_all_msix(hwdev);
+ /* disable mgmt cpu report any event */
+ hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
+
+ sdk_info(hwdev->dev_hdl, "global_func_idx: %d, func_type: %d, host_id: %d, ppf: %d, mpf: %d\n",
+ hwif->attr.func_global_idx, hwif->attr.func_type,
+ hwif->attr.pci_intf_idx, hwif->attr.ppf_idx,
+ hwif->attr.mpf_idx);
+
+ return 0;
+
+hwif_ready_err:
+ kfree(hwif);
+
+ return err;
+}
+
+/**
+ * hinic_free_hwif - free the hw interface
+ * @hwdev: the pointer to hw device
+ */
+void hinic_free_hwif(struct hinic_hwdev *hwdev)
+{
+ kfree(hwdev->hwif);
+}
+
+int hinic_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align,
+ unsigned int flag,
+ struct hinic_dma_addr_align *mem_align)
+{
+ void *vaddr, *align_vaddr;
+ dma_addr_t paddr, align_paddr;
+ u64 real_size = size;
+
+ vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ /* align */
+ if (align_paddr == paddr) {
+ align_vaddr = vaddr;
+ goto out;
+ }
+
+ dma_free_coherent(dev_hdl, real_size, vaddr, paddr);
+
+ /* realloc memory for align */
+ real_size = size + align;
+ vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr));
+
+out:
+ mem_align->real_size = (u32)real_size;
+ mem_align->ori_vaddr = vaddr;
+ mem_align->ori_paddr = paddr;
+ mem_align->align_vaddr = align_vaddr;
+ mem_align->align_paddr = align_paddr;
+
+ return 0;
+}
+
+void hinic_dma_free_coherent_align(void *dev_hdl,
+ struct hinic_dma_addr_align *mem_align)
+{
+ dma_free_coherent(dev_hdl, mem_align->real_size,
+ mem_align->ori_vaddr, mem_align->ori_paddr);
+}
+
+u16 hinic_global_func_id(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.func_global_idx;
+}
+EXPORT_SYMBOL(hinic_global_func_id);
+
+/**
+ * get function id from register,used by sriov hot migration process
+ * @hwdev: the pointer to hw device
+ */
+u16 hinic_global_func_id_hw(void *hwdev)
+{
+ u32 addr, attr0;
+ struct hinic_hwdev *dev;
+
+ dev = (struct hinic_hwdev *)hwdev;
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(dev->hwif, addr);
+
+ return HINIC_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+}
+
+static int func_busy_state_check(struct hinic_hwdev *hwdev)
+{
+ u32 func_state;
+ int cycle;
+
+ /* set BUSY before src vm suspend and clear it before dst vm resume */
+ cycle = PIPE_CYCLE_MAX;
+ func_state = hinic_func_busy_state_get(hwdev);
+ while (func_state && cycle) {
+ msleep(20);
+ cycle--;
+ if (!cycle) {
+ sdk_err(hwdev->dev_hdl, "busy_state suspend timeout");
+ return -ETIMEDOUT;
+ }
+
+ func_state = hinic_func_busy_state_get(hwdev);
+ }
+
+ return 0;
+}
+
+int hinic_func_own_get(void *hwdev)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+ u32 func_state;
+ int err;
+
+ if (!HINIC_IS_VF(dev))
+ return 0;
+
+restart:
+ down(&dev->func_sem);
+
+ dev->func_ref++;
+ hinic_func_own_bit_set(dev, 1);
+
+ func_state = hinic_func_busy_state_get(hwdev);
+ if (func_state) {
+ dev->func_ref--;
+ if (dev->func_ref == 0)
+ hinic_func_own_bit_set(dev, 0);
+
+ up(&dev->func_sem);
+ err = func_busy_state_check(dev);
+ if (err)
+ return err;
+ goto restart;
+ }
+
+ up(&dev->func_sem);
+ return 0;
+}
+
+void hinic_func_own_free(void *hwdev)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+
+ if (!HINIC_IS_VF(dev))
+ return;
+
+ down(&dev->func_sem);
+ dev->func_ref--;
+ if (dev->func_ref == 0)
+ hinic_func_own_bit_set(dev, 0);
+
+ up(&dev->func_sem);
+}
+
+/**
+ * get function id, used by sriov hot migratition process.
+ * @hwdev: the pointer to hw device
+ * @func_id: function id
+ */
+int hinic_global_func_id_get(void *hwdev, u16 *func_id)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+ int err;
+
+ /* only vf get func_id from chip reg for sriov migrate */
+ if (!HINIC_IS_VF(dev)) {
+ *func_id = hinic_global_func_id(hwdev);
+ return 0;
+ }
+
+ err = func_busy_state_check(dev);
+ if (err)
+ return err;
+
+ *func_id = hinic_global_func_id_hw(dev);
+ return 0;
+}
+
+u16 hinic_intr_num(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.num_irqs;
+}
+EXPORT_SYMBOL(hinic_intr_num);
+
+u8 hinic_pf_id_of_vf(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.port_to_port_idx;
+}
+EXPORT_SYMBOL(hinic_pf_id_of_vf);
+
+u16 hinic_pf_id_of_vf_hw(void *hwdev)
+{
+ u32 addr, attr0;
+ struct hinic_hwdev *dev;
+
+ dev = (struct hinic_hwdev *)hwdev;
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(dev->hwif, addr);
+
+ return HINIC_AF0_GET(attr0, P2P_IDX);
+}
+
+u8 hinic_pcie_itf_id(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.pci_intf_idx;
+}
+EXPORT_SYMBOL(hinic_pcie_itf_id);
+
+u8 hinic_vf_in_pf(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.vf_in_pf;
+}
+EXPORT_SYMBOL(hinic_vf_in_pf);
+
+enum func_type hinic_func_type(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.func_type;
+}
+EXPORT_SYMBOL(hinic_func_type);
+
+u8 hinic_ceq_num(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.num_ceqs;
+}
+EXPORT_SYMBOL(hinic_ceq_num);
+
+u8 hinic_dma_attr_entry_num(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.num_dma_attr;
+}
+EXPORT_SYMBOL(hinic_dma_attr_entry_num);
+
+u16 hinic_glb_pf_vf_offset(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.global_vf_id_of_pf;
+}
+EXPORT_SYMBOL(hinic_glb_pf_vf_offset);
+
+u8 hinic_mpf_idx(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.mpf_idx;
+}
+EXPORT_SYMBOL(hinic_mpf_idx);
+
+u8 hinic_ppf_idx(void *hwdev)
+{
+ struct hinic_hwif *hwif;
+
+ if (!hwdev)
+ return 0;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ return hwif->attr.ppf_idx;
+}
+EXPORT_SYMBOL(hinic_ppf_idx);
+
+#define CEQ_CTRL_0_CHIP_MODE_SHIFT 26
+#define CEQ_CTRL_0_CHIP_MODE_MASK 0xFU
+#define CEQ_CTRL_0_GET(val, member) \
+ (((val) >> CEQ_CTRL_0_##member##_SHIFT) & \
+ CEQ_CTRL_0_##member##_MASK)
+
+/**
+ * hinic_get_db_size - get db size ceq ctrl: bit26~29: uP write vf mode is
+ * normal(0x0), bmgw(0x1) or vmgw(0x2) and normal mode db size is 512k,
+ * bmgw or vmgw mode db size is 256k
+ * @cfg_reg_base: pointer to cfg_reg_base
+ * @chip_mode: pointer to chip_mode
+ */
+u32 hinic_get_db_size(void *cfg_reg_base, enum hinic_chip_mode *chip_mode)
+{
+ u32 attr0, ctrl0;
+
+ attr0 = be32_to_cpu(readl((u8 __iomem *)cfg_reg_base +
+ HINIC_CSR_FUNC_ATTR0_ADDR));
+
+ /* PF is always normal mode & db size is 512K */
+ if (HINIC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF) {
+ *chip_mode = CHIP_MODE_NORMAL;
+ return HINIC_DB_DWQE_SIZE;
+ }
+
+ ctrl0 = be32_to_cpu(readl((u8 __iomem *)cfg_reg_base +
+ HINIC_CSR_CEQ_CTRL_0_ADDR(0)));
+
+ *chip_mode = CEQ_CTRL_0_GET(ctrl0, CHIP_MODE);
+
+ switch (*chip_mode) {
+ case CHIP_MODE_VMGW:
+ case CHIP_MODE_BMGW:
+ return HINIC_GW_VF_DB_SIZE;
+ default:
+ return HINIC_DB_DWQE_SIZE;
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.h b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h
new file mode 100644
index 000000000000..568890c4aa08
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_HWIF_H
+#define HINIC_HWIF_H
+
+#include "hinic_hwdev.h"
+
+#define HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000
+
+struct hinic_free_db_area {
+ u32 db_idx[HINIC_DB_MAX_AREAS];
+
+ u32 num_free;
+
+ u32 alloc_pos;
+ u32 return_pos;
+
+ /* spinlock for allocating doorbell area */
+ spinlock_t idx_lock;
+};
+
+struct hinic_func_attr {
+ u16 func_global_idx;
+ u8 port_to_port_idx;
+ u8 pci_intf_idx;
+ u8 vf_in_pf;
+ enum func_type func_type;
+
+ u8 mpf_idx;
+
+ u8 ppf_idx;
+
+ u16 num_irqs; /* max: 2 ^ 15 */
+ u8 num_aeqs; /* max: 2 ^ 3 */
+ u8 num_ceqs; /* max: 2 ^ 7 */
+
+ u8 num_dma_attr; /* max: 2 ^ 6 */
+
+ u16 global_vf_id_of_pf;
+};
+
+struct hinic_hwif {
+ u8 __iomem *cfg_regs_base;
+ u8 __iomem *intr_regs_base;
+ u64 db_base_phy;
+ u8 __iomem *db_base;
+
+#if defined(__aarch64__)
+ void __iomem *dwqe_mapping;
+#else
+ struct io_mapping *dwqe_mapping;
+#endif
+ struct hinic_free_db_area free_db_area;
+
+ struct hinic_func_attr attr;
+
+ void *pdev;
+ enum hinic_chip_mode chip_mode;
+ u32 db_size;
+};
+
+struct hinic_dma_addr_align {
+ u32 real_size;
+
+ void *ori_vaddr;
+ dma_addr_t ori_paddr;
+
+ void *align_vaddr;
+ dma_addr_t align_paddr;
+};
+
+u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg);
+
+void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val);
+
+void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status);
+
+enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif);
+
+enum hinic_doorbell_ctrl
+ hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif);
+
+enum hinic_outbound_ctrl
+ hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif);
+
+void hinic_enable_doorbell(struct hinic_hwif *hwif);
+
+void hinic_disable_doorbell(struct hinic_hwif *hwif);
+
+void hinic_enable_outbound(struct hinic_hwif *hwif);
+
+void hinic_disable_outbound(struct hinic_hwif *hwif);
+
+int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base,
+ void *intr_reg_base, u64 db_base_phy,
+ void *db_base, void *dwqe_mapping);
+
+void hinic_free_hwif(struct hinic_hwdev *hwdev);
+
+int wait_until_doorbell_flush_states(struct hinic_hwif *hwif,
+ enum hinic_doorbell_ctrl states);
+
+int hinic_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align,
+ unsigned int flag,
+ struct hinic_dma_addr_align *mem_align);
+
+void hinic_dma_free_coherent_align(void *dev_hdl,
+ struct hinic_dma_addr_align *mem_align);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.c b/drivers/net/ethernet/huawei/hinic/hinic_lld.c
new file mode 100644
index 000000000000..76ca3fb85788
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.c
@@ -0,0 +1,2923 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/rtc.h>
+#include <linux/aer.h>
+#include <linux/debugfs.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw.h"
+#include "hinic_lld.h"
+#include "hinic_pci_id_tbl.h"
+#include "hinic_nic_dev.h"
+#include "hinic_sriov.h"
+#include "hinic_dbgtool_knl.h"
+#include "hinic_nictool.h"
+
+#define HINIC_PCI_CFG_REG_BAR 0
+#define HINIC_PCI_INTR_REG_BAR 2
+#define HINIC_PCI_DB_BAR 4
+#define HINIC_PCI_VENDOR_ID 0x19e5
+
+#define SELF_TEST_BAR_ADDR_OFFSET 0x883c
+
+#define HINIC_SECOND_BASE 1000
+#define HINIC_SYNC_YEAR_OFFSET 1900
+#define HINIC_SYNC_MONTH_OFFSET 1
+#define HINIC_MINUTE_BASE 60
+#define HINIC_WAIT_TOOL_CNT_TIMEOUT 10000
+#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+
+#define HINIC_DRV_DESC "Huawei(R) Intelligent Network Interface Card Driver"
+#define HINICVF_DRV_DESC "Huawei(R) Intelligent Virtual Function Network Driver"
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION(HINIC_DRV_DESC);
+MODULE_VERSION(HINIC_DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_PCI_IOV
+static bool disable_vf_load;
+module_param(disable_vf_load, bool, 0444);
+MODULE_PARM_DESC(disable_vf_load,
+ "Disable virtual functions probe or not - default is false");
+#endif /* CONFIG_PCI_IOV */
+
+enum {
+ HINIC_FUNC_IN_REMOVE = BIT(0),
+ HINIC_FUNC_PRB_ERR = BIT(1),
+ HINIC_FUNC_PRB_DELAY = BIT(2),
+};
+
+/* Structure pcidev private */
+struct hinic_pcidev {
+ struct pci_dev *pcidev;
+ void *hwdev;
+ struct card_node *chip_node;
+ struct hinic_lld_dev lld_dev;
+ /* Record the service object address,
+ * such as hinic_dev and toe_dev, fc_dev
+ */
+ void *uld_dev[SERVICE_T_MAX];
+ /* Record the service object name */
+ char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ];
+ /* It is a the global variable for driver to manage
+ * all function device linked list
+ */
+ struct list_head node;
+
+ void __iomem *cfg_reg_base;
+ void __iomem *intr_reg_base;
+ u64 db_base_phy;
+ void __iomem *db_base;
+
+#if defined(__aarch64__)
+ void __iomem *dwqe_mapping;
+#else
+ struct io_mapping *dwqe_mapping;
+#endif
+ /* lock for attach/detach uld */
+ struct mutex pdev_mutex;
+ struct hinic_sriov_info sriov_info;
+
+ u32 init_state;
+ /* setted when uld driver processing event */
+ unsigned long state;
+ struct pci_device_id id;
+
+ unsigned long flag;
+
+ struct work_struct slave_nic_work;
+ struct workqueue_struct *slave_nic_init_workq;
+ struct delayed_work slave_nic_init_dwork;
+ enum hinic_chip_mode chip_mode;
+ bool nic_cur_enable;
+ bool nic_des_enable;
+
+ struct timer_list syncfw_time_timer;
+ int card_id;
+};
+
+#define HINIC_EVENT_PROCESS_TIMEOUT 10000
+
+#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0)
+#define SET_BIT(num, n) ((num) | (1UL << (n)))
+#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n))))
+
+#define MAX_CARD_ID 64
+static u64 card_bit_map;
+LIST_HEAD(g_hinic_chip_list);
+struct hinic_uld_info g_uld_info[SERVICE_T_MAX];
+static const char *s_uld_name[SERVICE_T_MAX] = {
+ "nic", "ovs", "roce", "toe", "iwarp", "fc", "fcoe", "migrate"};
+
+enum hinic_lld_status {
+ HINIC_NODE_CHANGE = BIT(0),
+};
+
+struct hinic_lld_lock {
+ /* lock for chip list */
+ struct mutex lld_mutex;
+ unsigned long status;
+ atomic_t dev_ref_cnt;
+};
+
+static struct hinic_lld_lock g_lld_lock;
+
+#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */
+#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */
+#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */
+
+/* node in chip_node will changed, tools or driver can't get node
+ * during this situation
+ */
+static void lld_lock_chip_node(void)
+{
+ u32 loop_cnt;
+
+ mutex_lock(&g_lld_lock.lld_mutex);
+
+ loop_cnt = 0;
+ while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) {
+ if (!test_and_set_bit(HINIC_NODE_CHANGE, &g_lld_lock.status))
+ break;
+
+ loop_cnt++;
+
+ if (loop_cnt % 10000 == 0)
+ pr_warn("Wait for lld node change complete for %us\n",
+ loop_cnt / 1000);
+
+ usleep_range(900, 1000);
+ }
+
+ if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED)
+ pr_warn("Wait for lld node change complete timeout when try to get lld lock\n");
+
+ loop_cnt = 0;
+ while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) {
+ if (!atomic_read(&g_lld_lock.dev_ref_cnt))
+ break;
+
+ loop_cnt++;
+
+ if (loop_cnt % 10000 == 0)
+ pr_warn("Wait for lld dev unused for %us, reference count: %d\n",
+ loop_cnt / 1000,
+ atomic_read(&g_lld_lock.dev_ref_cnt));
+
+ usleep_range(900, 1000);
+ }
+
+ if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY)
+ pr_warn("Wait for lld dev unused timeout\n");
+
+ mutex_unlock(&g_lld_lock.lld_mutex);
+}
+
+static void lld_unlock_chip_node(void)
+{
+ clear_bit(HINIC_NODE_CHANGE, &g_lld_lock.status);
+}
+
+/* When tools or other drivers want to get node of chip_node, use this function
+ * to prevent node be freed
+ */
+static void lld_dev_hold(void)
+{
+ u32 loop_cnt = 0;
+
+ /* ensure there have not any chip node in changing */
+ mutex_lock(&g_lld_lock.lld_mutex);
+
+ while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) {
+ if (!test_bit(HINIC_NODE_CHANGE, &g_lld_lock.status))
+ break;
+
+ loop_cnt++;
+
+ if (loop_cnt % 10000 == 0)
+ pr_warn("Wait lld node change complete for %us\n",
+ loop_cnt / 1000);
+
+ usleep_range(900, 1000);
+ }
+
+ if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT)
+ pr_warn("Wait lld node change complete timeout when try to hode lld dev\n");
+
+ atomic_inc(&g_lld_lock.dev_ref_cnt);
+
+ mutex_unlock(&g_lld_lock.lld_mutex);
+}
+
+static void lld_dev_put(void)
+{
+ atomic_dec(&g_lld_lock.dev_ref_cnt);
+}
+
+static void hinic_lld_lock_init(void)
+{
+ mutex_init(&g_lld_lock.lld_mutex);
+ atomic_set(&g_lld_lock.dev_ref_cnt, 0);
+}
+
+static atomic_t tool_used_cnt;
+
+void hinic_tool_cnt_inc(void)
+{
+ atomic_inc(&tool_used_cnt);
+}
+
+void hinic_tool_cnt_dec(void)
+{
+ atomic_dec(&tool_used_cnt);
+}
+
+static int attach_uld(struct hinic_pcidev *dev, enum hinic_service_type type,
+ struct hinic_uld_info *uld_info)
+{
+ void *uld_dev = NULL;
+ int err;
+
+ mutex_lock(&dev->pdev_mutex);
+
+ if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED) {
+ sdk_err(&dev->pcidev->dev, "SDK init failed, can not attach uld\n");
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ if (dev->uld_dev[type]) {
+ sdk_err(&dev->pcidev->dev,
+ "%s driver has attached to pcie device\n",
+ s_uld_name[type]);
+ err = 0;
+ goto out_unlock;
+ }
+
+ if ((hinic_get_func_mode(dev->hwdev) == FUNC_MOD_NORMAL_HOST) &&
+ type == SERVICE_T_OVS && !hinic_support_ovs(dev->hwdev, NULL)) {
+ sdk_warn(&dev->pcidev->dev, "Dev not support %s\n",
+ s_uld_name[type]);
+ err = 0;
+ goto out_unlock;
+ }
+
+ err = uld_info->probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name[type]);
+ if (err || !uld_dev) {
+ sdk_err(&dev->pcidev->dev,
+ "Failed to add object for %s driver to pcie device\n",
+ s_uld_name[type]);
+ goto probe_failed;
+ }
+
+ dev->uld_dev[type] = uld_dev;
+ mutex_unlock(&dev->pdev_mutex);
+
+ sdk_info(&dev->pcidev->dev,
+ "Attach %s driver to pcie device succeed\n", s_uld_name[type]);
+ return 0;
+
+probe_failed:
+out_unlock:
+ mutex_unlock(&dev->pdev_mutex);
+
+ return err;
+}
+
+static void detach_uld(struct hinic_pcidev *dev, enum hinic_service_type type)
+{
+ struct hinic_uld_info *uld_info = &g_uld_info[type];
+ u32 cnt = 0;
+
+ mutex_lock(&dev->pdev_mutex);
+ if (!dev->uld_dev[type]) {
+ mutex_unlock(&dev->pdev_mutex);
+ return;
+ }
+
+ while (cnt < HINIC_EVENT_PROCESS_TIMEOUT) {
+ if (!test_and_set_bit(type, &dev->state))
+ break;
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ uld_info->remove(&dev->lld_dev, dev->uld_dev[type]);
+ dev->uld_dev[type] = NULL;
+ if (cnt < HINIC_EVENT_PROCESS_TIMEOUT)
+ clear_bit(type, &dev->state);
+
+ sdk_info(&dev->pcidev->dev,
+ "Detach %s driver from pcie device succeed\n",
+ s_uld_name[type]);
+ mutex_unlock(&dev->pdev_mutex);
+}
+
+static void attach_ulds(struct hinic_pcidev *dev)
+{
+ enum hinic_service_type type;
+
+ for (type = SERVICE_T_OVS; type < SERVICE_T_MAX; type++) {
+ if (g_uld_info[type].probe)
+ attach_uld(dev, type, &g_uld_info[type]);
+ }
+}
+
+static void detach_ulds(struct hinic_pcidev *dev)
+{
+ enum hinic_service_type type;
+
+ for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) {
+ if (g_uld_info[type].probe)
+ detach_uld(dev, type);
+ }
+}
+
+int hinic_register_uld(enum hinic_service_type type,
+ struct hinic_uld_info *uld_info)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (type >= SERVICE_T_MAX) {
+ pr_err("Unknown type %d of up layer driver to register\n",
+ type);
+ return -EINVAL;
+ }
+
+ if (!uld_info || !uld_info->probe || !uld_info->remove) {
+ pr_err("Invalid information of %s driver to register\n",
+ s_uld_name[type]);
+ return -EINVAL;
+ }
+
+ lld_dev_hold();
+
+ if (g_uld_info[type].probe) {
+ pr_err("%s driver has registered\n", s_uld_name[type]);
+ lld_dev_put();
+ return -EINVAL;
+ }
+
+ memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info));
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (attach_uld(dev, type, uld_info)) {
+ sdk_err(&dev->pcidev->dev,
+ "Attach %s driver to pcie device failed\n",
+ s_uld_name[type]);
+ continue;
+ }
+ }
+ }
+
+ lld_dev_put();
+
+ pr_info("Register %s driver succeed\n", s_uld_name[type]);
+ return 0;
+}
+EXPORT_SYMBOL(hinic_register_uld);
+
+void hinic_unregister_uld(enum hinic_service_type type)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+ struct hinic_uld_info *uld_info;
+
+ if (type >= SERVICE_T_MAX) {
+ pr_err("Unknown type %d of up layer driver to unregister\n",
+ type);
+ return;
+ }
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ detach_uld(dev, type);
+ }
+ }
+
+ uld_info = &g_uld_info[type];
+ memset(uld_info, 0, sizeof(*uld_info));
+ lld_dev_put();
+}
+EXPORT_SYMBOL(hinic_unregister_uld);
+
+#define HINIC_SYNFW_TIME_PERIOD (60 * 60 * 1000)
+
+static void hinic_syncfw_timer_handler(struct timer_list *t)
+{
+ struct hinic_pcidev *pci_adapter = from_timer(pci_adapter, t,
+ syncfw_time_timer);
+ u64 tv_msec;
+
+ tv_msec = ktime_to_ms(ktime_get_real());
+
+ hinic_sync_time_async(pci_adapter->hwdev, tv_msec);
+ mod_timer(&pci_adapter->syncfw_time_timer,
+ jiffies + msecs_to_jiffies(HINIC_SYNFW_TIME_PERIOD));
+}
+
+void hinic_init_syncfw_timer(struct hinic_pcidev *pci_adapter)
+{
+ if (hinic_get_func_mode(pci_adapter->hwdev) != FUNC_MOD_NORMAL_HOST ||
+ hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
+ return;
+
+ timer_setup(&pci_adapter->syncfw_time_timer,
+ hinic_syncfw_timer_handler, 0);
+
+ pci_adapter->syncfw_time_timer.expires =
+ jiffies + msecs_to_jiffies(HINIC_SYNFW_TIME_PERIOD);
+
+ add_timer(&pci_adapter->syncfw_time_timer);
+}
+
+void hinic_destroy_syncfw_timer(struct hinic_pcidev *pci_adapter)
+{
+ if (hinic_get_func_mode(pci_adapter->hwdev) != FUNC_MOD_NORMAL_HOST ||
+ hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
+ return;
+
+ del_timer_sync(&pci_adapter->syncfw_time_timer);
+}
+
+static void hinic_sync_time_to_fmw(struct hinic_pcidev *pdev_pri)
+{
+ struct tm tm = {0};
+ u64 tv_msec;
+ int err;
+
+ tv_msec = ktime_to_ms(ktime_get_real());
+ err = hinic_sync_time(pdev_pri->hwdev, tv_msec);
+ if (err) {
+ sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n",
+ err);
+ } else {
+ time64_to_tm(tv_msec / MSEC_PER_SEC, 0, &tm);
+ sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %ld-%02d-%02d %02d:%02d:%02d.\n",
+ tm.tm_year + HINIC_SYNC_YEAR_OFFSET,
+ tm.tm_mon + HINIC_SYNC_MONTH_OFFSET,
+ tm.tm_mday, tm.tm_hour,
+ tm.tm_min, tm.tm_sec);
+ }
+}
+
+enum hinic_ver_incompat_mode {
+ /* New driver can't compat with old firmware */
+ VER_INCOMP_NEW_DRV_OLD_FW,
+ /* New Firmware can't compat with old driver */
+ VER_INCOMP_NEW_FW_OLD_DRV,
+};
+
+struct hinic_version_incompat {
+ char *version;
+ char *advise;
+ u32 incompat_mode;
+};
+
+struct hinic_version_incompat ver_incompat_table[] = {
+ {
+ .version = "1.2.2.0",
+ .advise = "Mechanism of cos changed",
+ .incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW),
+ },
+ {
+ .version = "1.2.3.0",
+ .advise = "Driver get sevice mode from firmware",
+ .incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW),
+ },
+};
+
+#define MAX_VER_FIELD_LEN 4
+#define MAX_VER_SPLIT_NUM 4
+static void __version_split(const char *str, int *split_num,
+ char rst[][MAX_VER_FIELD_LEN])
+{
+ const char delim = '.';
+ const char *src;
+ int cnt = 0;
+ u16 idx, end, token_len;
+
+ idx = 0;
+ while (idx < strlen(str)) {
+ for (end = idx; end < strlen(str); end++) {
+ if (*(str + end) == delim)
+ break; /* find */
+ }
+
+ if (end != idx) {
+ token_len = min_t(u16, end - idx,
+ MAX_VER_FIELD_LEN - 1);
+ src = str + idx;
+ memcpy(rst[cnt], src, token_len);
+ if (++cnt >= MAX_VER_SPLIT_NUM)
+ break;
+ }
+
+ idx = end + 1; /* skip delim */
+ }
+
+ *split_num = cnt;
+}
+
+int hinic_version_cmp(char *ver1, char *ver2)
+{
+ char ver1_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
+ char ver2_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
+ int split1_num, split2_num;
+ int ver1_num, ver2_num;
+ int split, err;
+
+ /* To compat older firmware version */
+ if (ver1[0] == 'B')
+ return -1;
+
+ if (ver2[0] == 'B')
+ return 1;
+
+ __version_split(ver1, &split1_num, ver1_split);
+ __version_split(ver2, &split2_num, ver2_split);
+
+ if (split1_num != MAX_VER_SPLIT_NUM ||
+ split2_num != MAX_VER_SPLIT_NUM) {
+ pr_err("Invalid version %s or %s\n", ver1, ver2);
+ return 0;
+ }
+
+ for (split = 0; split < MAX_VER_SPLIT_NUM; split++) {
+ err = kstrtoint(ver1_split[split], 0, &ver1_num);
+ err |= kstrtoint(ver2_split[split], 0, &ver2_num);
+ if (err) {
+ pr_err("Failed to parse version: %s, %s\n",
+ ver1_split[split], ver2_split[split]);
+ return 0;
+ }
+
+ if (ver1_num > ver2_num)
+ return 1;
+ else if (ver1_num < ver2_num)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __version_mismatch(struct hinic_pcidev *pcidev, char *cur_fw_ver,
+ char *cur_drv_ver,
+ struct hinic_version_incompat *ver_incompat,
+ int start_entry)
+{
+ struct hinic_version_incompat *ver_incmp_tmp;
+ int fw_ver_comp;
+ int i, num_entry;
+
+ fw_ver_comp = hinic_version_cmp(cur_fw_ver, ver_incompat->version);
+ if (fw_ver_comp <= 0) {
+ /* Check if new driver compatible with old fw */
+ for (i = start_entry; i >= 0; i--) {
+ ver_incmp_tmp = &ver_incompat_table[i];
+ if (hinic_version_cmp(cur_fw_ver,
+ ver_incmp_tmp->version) >= 0)
+ break; /* Not need to check anymore */
+
+ if (ver_incmp_tmp->incompat_mode &
+ BIT(VER_INCOMP_NEW_DRV_OLD_FW)) {
+ sdk_err(&pcidev->pcidev->dev,
+ "Version incompatible: %s, please update firmware to %s, or use %s driver\n",
+ ver_incmp_tmp->advise,
+ cur_drv_ver, cur_fw_ver);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+ }
+
+ /* check if old driver compatible with new firmware */
+ num_entry = (int)sizeof(ver_incompat_table) /
+ (int)sizeof(ver_incompat_table[0]);
+ for (i = start_entry + 1; i < num_entry; i++) {
+ ver_incmp_tmp = &ver_incompat_table[i];
+
+ if (hinic_version_cmp(cur_fw_ver, ver_incmp_tmp->version) < 0)
+ break; /* Not need to check anymore */
+
+ if (ver_incmp_tmp->incompat_mode &
+ BIT(VER_INCOMP_NEW_FW_OLD_DRV)) {
+ sdk_err(&pcidev->pcidev->dev,
+ "Version incompatible: %s, please update driver to %s, or use %s firmware\n",
+ ver_incmp_tmp->advise,
+ cur_fw_ver, cur_drv_ver);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void hinic_ignore_minor_version(char *version)
+{
+ char ver_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} };
+ int max_ver_len, split_num = 0;
+ int err;
+
+ __version_split(version, &split_num, ver_split);
+ if (split_num != MAX_VER_SPLIT_NUM)
+ return;
+
+ max_ver_len = (int)strlen(version) + 1;
+ memset(version, 0, max_ver_len);
+
+ err = snprintf(version, max_ver_len, "%s.%s.%s.0",
+ ver_split[0], ver_split[1], ver_split[2]);
+ if (err <= 0 || err >= max_ver_len)
+ pr_err("Failed to snprintf version, function return(%d) and dest_len(%d)\n",
+ err, max_ver_len);
+}
+
+static int hinic_detect_version_compatible(struct hinic_pcidev *pcidev)
+{
+ struct hinic_fw_version fw_ver = { {0} };
+ struct hinic_version_incompat *ver_incompat;
+ char drv_ver[MAX_VER_SPLIT_NUM * MAX_VER_FIELD_LEN] = {0};
+ int idx, num_entry, drv_ver_len;
+ int ver_mismatch;
+ int err;
+
+ err = hinic_get_fw_version(pcidev->hwdev, &fw_ver);
+ if (err) {
+ sdk_err(&pcidev->pcidev->dev,
+ "Failed to get firmware version\n");
+ return err;
+ }
+
+ drv_ver_len = min_t(int, (int)sizeof(drv_ver) - 1,
+ (int)strlen(HINIC_DRV_VERSION));
+ memcpy(drv_ver, HINIC_DRV_VERSION, drv_ver_len);
+
+ sdk_info(&pcidev->pcidev->dev, "Version info: driver %s, firmware %s\n",
+ drv_ver, fw_ver.mgmt_ver);
+
+ hinic_ignore_minor_version(fw_ver.mgmt_ver);
+ hinic_ignore_minor_version(drv_ver);
+ ver_mismatch = hinic_version_cmp(drv_ver, fw_ver.mgmt_ver);
+ if (!ver_mismatch)
+ return 0;
+
+ num_entry = (int)sizeof(ver_incompat_table) /
+ (int)sizeof(ver_incompat_table[0]);
+ for (idx = num_entry - 1; idx >= 0; idx--) {
+ ver_incompat = &ver_incompat_table[idx];
+
+ if (hinic_version_cmp(drv_ver, ver_incompat->version) < 0)
+ continue;
+
+ /* Find older verion of driver in table */
+ return __version_mismatch(pcidev, fw_ver.mgmt_ver, drv_ver,
+ ver_incompat, idx);
+ }
+
+ return 0;
+}
+
+struct mctp_hdr {
+ u16 resp_code;
+ u16 reason_code;
+ u32 manufacture_id;
+
+ u8 cmd_rsvd;
+ u8 major_cmd;
+ u8 sub_cmd;
+ u8 spc_field;
+};
+
+struct mctp_bdf_info {
+ struct mctp_hdr hdr; /* spc_field: pf index */
+ u8 rsvd;
+ u8 bus;
+ u8 device;
+ u8 function;
+};
+
+enum mctp_resp_code {
+ /* COMMAND_COMPLETED = 0, */
+ /* COMMAND_FAILED = 1, */
+ /* COMMAND_UNAVALILABLE = 2, */
+ COMMAND_UNSUPPORTED = 3,
+};
+
+static void __mctp_set_hdr(struct mctp_hdr *hdr,
+ struct hinic_mctp_host_info *mctp_info)
+{
+ u32 manufacture_id = 0x07DB;
+
+ hdr->cmd_rsvd = 0;
+ hdr->major_cmd = mctp_info->major_cmd;
+ hdr->sub_cmd = mctp_info->sub_cmd;
+ hdr->manufacture_id = cpu_to_be32(manufacture_id);
+ hdr->resp_code = cpu_to_be16(hdr->resp_code);
+ hdr->reason_code = cpu_to_be16(hdr->reason_code);
+}
+
+static void __mctp_get_bdf(struct hinic_pcidev *pci_adapter,
+ struct hinic_mctp_host_info *mctp_info)
+{
+ struct pci_dev *pdev = pci_adapter->pcidev;
+ struct mctp_bdf_info *bdf_info = mctp_info->data;
+
+ bdf_info->bus = pdev->bus->number;
+ bdf_info->device = (u8)(pdev->devfn >> 3); /* 5bits in devfn */
+ bdf_info->function = (u8)(pdev->devfn & 0x7); /* 3bits in devfn */
+
+ memset(&bdf_info->hdr, 0, sizeof(bdf_info->hdr));
+ __mctp_set_hdr(&bdf_info->hdr, mctp_info);
+ bdf_info->hdr.spc_field =
+ (u8)hinic_global_func_id_hw(pci_adapter->hwdev);
+
+ mctp_info->data_len = sizeof(*bdf_info);
+}
+
+#define MCTP_MAJOR_CMD_PUBLIC 0x0
+#define MCTP_MAJOR_CMD_NIC 0x1
+
+#define MCTP_PUBLIC_SUB_CMD_BDF 0x1
+#define MCTP_PUBLIC_SUB_CMD_DRV 0x4
+
+#define MCTP_NIC_SUB_CMD_IP 0x1
+
+static void __mctp_get_host_info(struct hinic_pcidev *dev,
+ struct hinic_mctp_host_info *mctp_info)
+{
+ struct mctp_hdr *hdr;
+
+ switch ((((u16)mctp_info->major_cmd) << 8) | mctp_info->sub_cmd) {
+ case (MCTP_MAJOR_CMD_PUBLIC << 8 | MCTP_PUBLIC_SUB_CMD_BDF):
+ __mctp_get_bdf(dev, mctp_info);
+ break;
+
+ default:
+ hdr = mctp_info->data;
+ hdr->reason_code = COMMAND_UNSUPPORTED;
+ __mctp_set_hdr(hdr, mctp_info);
+ mctp_info->data_len = sizeof(*hdr);
+ break;
+ }
+}
+
+static bool __is_pcidev_match_chip_name(const char *ifname,
+ struct hinic_pcidev *dev,
+ struct card_node *chip_node,
+ enum func_type type)
+{
+ if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) {
+ if (type == TYPE_UNKNOWN) {
+ if (dev->init_state < HINIC_INIT_STATE_HW_PART_INITED)
+ return false;
+ } else {
+ if (dev->init_state < HINIC_INIT_STATE_HW_PART_INITED ||
+ hinic_func_type(dev->hwdev) != type)
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+static struct hinic_pcidev *_get_pcidev_by_chip_name(char *ifname,
+ enum func_type type)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (__is_pcidev_match_chip_name(ifname, dev, chip_node,
+ type)) {
+ lld_dev_put();
+ return dev;
+ }
+ }
+ }
+
+ lld_dev_put();
+
+ return NULL;
+}
+
+static struct hinic_pcidev *hinic_get_pcidev_by_chip_name(char *ifname)
+{
+ struct hinic_pcidev *dev, *dev_hw_init;
+
+ /* find hw init device first */
+ dev_hw_init = _get_pcidev_by_chip_name(ifname, TYPE_UNKNOWN);
+ if (dev_hw_init) {
+ if (hinic_func_type(dev_hw_init->hwdev) == TYPE_PPF)
+ return dev_hw_init;
+ }
+
+ dev = _get_pcidev_by_chip_name(ifname, TYPE_PPF);
+ if (dev) {
+ if (dev_hw_init && dev_hw_init->init_state >= dev->init_state)
+ return dev_hw_init;
+
+ return dev;
+ }
+
+ dev = _get_pcidev_by_chip_name(ifname, TYPE_PF);
+ if (dev) {
+ if (dev_hw_init && dev_hw_init->init_state >= dev->init_state)
+ return dev_hw_init;
+
+ return dev;
+ }
+
+ dev = _get_pcidev_by_chip_name(ifname, TYPE_VF);
+ if (dev)
+ return dev;
+
+ return NULL;
+}
+
+static bool __is_pcidev_match_dev_name(const char *ifname,
+ struct hinic_pcidev *dev,
+ enum hinic_service_type type)
+{
+ struct hinic_nic_dev *nic_dev;
+ enum hinic_service_type i;
+
+ if (type == SERVICE_T_MAX) {
+ for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) {
+ if (!strncmp(dev->uld_dev_name[i], ifname, IFNAMSIZ))
+ return true;
+ }
+ } else {
+ if (!strncmp(dev->uld_dev_name[type], ifname, IFNAMSIZ))
+ return true;
+ }
+
+ nic_dev = dev->uld_dev[SERVICE_T_NIC];
+ if (nic_dev) {
+ if (!strncmp(nic_dev->netdev->name, ifname, IFNAMSIZ))
+ return true;
+ }
+
+ return false;
+}
+
+static struct hinic_pcidev *
+ hinic_get_pcidev_by_dev_name(char *ifname, enum hinic_service_type type)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (__is_pcidev_match_dev_name(ifname, dev, type)) {
+ lld_dev_put();
+ return dev;
+ }
+ }
+ }
+ lld_dev_put();
+
+ return NULL;
+}
+
+static struct hinic_pcidev *hinic_get_pcidev_by_ifname(char *ifname)
+{
+ struct hinic_pcidev *dev;
+
+ /* support search hwdev by chip name, net device name,
+ * or fc device name
+ */
+ /* Find pcidev by chip_name first */
+ dev = hinic_get_pcidev_by_chip_name(ifname);
+ if (dev)
+ return dev;
+
+ /* If ifname not a chip name,
+ * find pcidev by FC name or netdevice name
+ */
+ return hinic_get_pcidev_by_dev_name(ifname, SERVICE_T_MAX);
+}
+
+int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!hwdev || !ifname)
+ return -EINVAL;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->hwdev == hwdev) {
+ strscpy(ifname, chip_node->chip_name,
+ IFNAMSIZ - 1);
+ ifname[IFNAMSIZ - 1] = 0;
+ lld_dev_put();
+ return 0;
+ }
+ }
+ }
+ lld_dev_put();
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL(hinic_get_chip_name_by_hwdev);
+
+static struct card_node *hinic_get_chip_node_by_hwdev(const void *hwdev)
+{
+ struct card_node *chip_node = NULL;
+ struct card_node *node_tmp = NULL;
+ struct hinic_pcidev *dev;
+
+ if (!hwdev)
+ return NULL;
+
+ lld_dev_hold();
+ list_for_each_entry(node_tmp, &g_hinic_chip_list, node) {
+ if (!chip_node) {
+ list_for_each_entry(dev, &node_tmp->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->hwdev == hwdev) {
+ chip_node = node_tmp;
+ break;
+ }
+ }
+ }
+ }
+
+ lld_dev_put();
+
+ return chip_node;
+}
+
+int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[])
+{
+ struct hinic_pcidev *dev = pci_get_drvdata(pdev);
+ struct card_node *chip_node;
+ u32 cnt;
+
+ if (!dev || !hinic_support_nic(dev->hwdev, NULL))
+ return -EINVAL;
+
+ lld_dev_hold();
+
+ cnt = 0;
+ chip_node = dev->chip_node;
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->init_state < HINIC_INIT_STATE_NIC_INITED)
+ continue;
+
+ if (HINIC_FUNC_IS_VF(dev->hwdev))
+ continue;
+
+ array[cnt] = dev->uld_dev[SERVICE_T_NIC];
+ cnt++;
+ }
+ lld_dev_put();
+
+ *dev_cnt = cnt;
+
+ return 0;
+}
+
+int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted, u8 *cos_up)
+{
+ struct hinic_pcidev *dev = pci_get_drvdata(pdev);
+ struct card_node *chip_node;
+
+ if (!dev)
+ return -EINVAL;
+
+ chip_node = dev->chip_node;
+ *is_setted = chip_node->cos_up_setted;
+ if (chip_node->cos_up_setted)
+ memcpy(cos_up, chip_node->cos_up, sizeof(chip_node->cos_up));
+
+ return 0;
+}
+
+int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up)
+{
+ struct hinic_pcidev *dev = pci_get_drvdata(pdev);
+ struct card_node *chip_node;
+
+ if (!dev)
+ return -EINVAL;
+
+ chip_node = dev->chip_node;
+ chip_node->cos_up_setted = true;
+ memcpy(chip_node->cos_up, cos_up, sizeof(chip_node->cos_up));
+
+ return 0;
+}
+
+void *hinic_get_hwdev_by_ifname(char *ifname)
+{
+ struct hinic_pcidev *dev;
+
+ dev = hinic_get_pcidev_by_ifname(ifname);
+ if (dev)
+ return dev->hwdev;
+
+ return NULL;
+}
+
+void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type)
+{
+ struct hinic_pcidev *dev;
+
+ if (type >= SERVICE_T_MAX) {
+ pr_err("Service type: %d is error\n", type);
+ return NULL;
+ }
+
+ dev = hinic_get_pcidev_by_dev_name(ifname, type);
+ if (dev)
+ return dev->uld_dev[type];
+
+ return NULL;
+}
+
+void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type)
+{
+ struct hinic_pcidev *dev;
+
+ /* support search hwdev by chip name, net device name,
+ * or fc device name, Find pcidev by chip_name first
+ */
+ dev = hinic_get_pcidev_by_chip_name(ifname);
+ if (dev)
+ return dev->uld_dev[type];
+
+ return NULL;
+}
+
+/* NOTICE: nictool can't use this function, because this function can't keep
+ * tool context mutual exclusive with remove context
+ */
+void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev,
+ enum hinic_service_type type)
+{
+ struct hinic_pcidev *pci_adapter;
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!pdev)
+ return NULL;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ if (!pci_adapter)
+ return NULL;
+
+ chip_node = pci_adapter->chip_node;
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ /* can't test HINIC_FUNC_IN_REMOVE bit in dev->flag, because
+ * TOE will call this function when detach toe driver
+ */
+
+ if (hinic_func_type(dev->hwdev) == TYPE_PPF) {
+ lld_dev_put();
+ return dev->uld_dev[type];
+ }
+ }
+ lld_dev_put();
+
+ return NULL;
+}
+EXPORT_SYMBOL(hinic_get_ppf_uld_by_pdev);
+
+void *hinic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter;
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!pdev)
+ return NULL;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ if (!pci_adapter)
+ return NULL;
+
+ chip_node = pci_adapter->chip_node;
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) ||
+ dev->init_state < HINIC_INIT_STATE_HW_IF_INITED)
+ continue;
+
+ if (dev->hwdev && hinic_func_type(dev->hwdev) == TYPE_PPF) {
+ lld_dev_put();
+ return dev->hwdev;
+ }
+ }
+ lld_dev_put();
+
+ return NULL;
+}
+
+void hinic_get_all_chip_id(void *id_info)
+{
+ struct nic_card_id *card_id = (struct nic_card_id *)id_info;
+ struct card_node *chip_node;
+ int i = 0;
+ int id, err;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%d", &id);
+ if (err <= 0)
+ pr_err("Failed to get hinic id\n");
+
+ card_id->id[i] = id;
+ i++;
+ }
+ lld_dev_put();
+ card_id->num = i;
+}
+
+static bool __is_func_valid(struct hinic_pcidev *dev)
+{
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ return false;
+
+ if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED)
+ return false;
+
+ if (HINIC_FUNC_IS_VF(dev->hwdev))
+ return false;
+
+ return true;
+}
+
+bool hinic_is_valid_bar_addr(u64 offset)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic_pcidev *dev;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (hinic_func_type(dev->hwdev) == TYPE_VF)
+ continue;
+
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (offset == pci_resource_start(dev->pcidev, 0)) {
+ lld_dev_put();
+ return true;
+ }
+ }
+ }
+ lld_dev_put();
+
+ return false;
+}
+
+void hinic_get_card_info(void *hwdev, void *bufin)
+{
+ struct card_node *chip_node = NULL;
+ struct card_info *info = (struct card_info *)bufin;
+ struct hinic_nic_dev *nic_dev;
+ struct hinic_pcidev *dev;
+ void *fun_hwdev;
+ u32 i = 0;
+
+ info->pf_num = 0;
+
+ chip_node = hinic_get_chip_node_by_hwdev(hwdev);
+ if (!chip_node)
+ return;
+
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (!__is_func_valid(dev))
+ continue;
+
+ fun_hwdev = dev->hwdev;
+
+ if (((hinic_support_fc(fun_hwdev, NULL)) ||
+ (hinic_support_fcoe(fun_hwdev, NULL))) &&
+ dev->uld_dev[SERVICE_T_FC]) {
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_FC);
+ strscpy(info->pf[i].name,
+ dev->uld_dev_name[SERVICE_T_FC], IFNAMSIZ);
+ }
+
+ if (hinic_support_nic(fun_hwdev, NULL)) {
+ nic_dev = dev->uld_dev[SERVICE_T_NIC];
+ if (nic_dev) {
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_NIC);
+ strscpy(info->pf[i].name,
+ nic_dev->netdev->name, IFNAMSIZ);
+ }
+ }
+
+ if ((hinic_support_ovs(fun_hwdev, NULL)) &&
+ dev->uld_dev[SERVICE_T_OVS])
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_OVS);
+
+ if ((hinic_support_roce(fun_hwdev, NULL)) &&
+ dev->uld_dev[SERVICE_T_ROCE])
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_ROCE);
+
+ if ((hinic_support_toe(fun_hwdev, NULL)) &&
+ dev->uld_dev[SERVICE_T_TOE])
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_TOE);
+
+ if (hinic_func_for_mgmt(fun_hwdev))
+ strscpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ);
+
+ if (hinic_func_for_pt(fun_hwdev))
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_PT);
+
+ if (hinic_func_for_hwpt(fun_hwdev))
+ info->pf[i].pf_type |= (u32)BIT(SERVICE_T_HWPT);
+
+ strscpy(info->pf[i].bus_info, pci_name(dev->pcidev),
+ sizeof(info->pf[i].bus_info));
+ info->pf_num++;
+ i = info->pf_num;
+ }
+ lld_dev_put();
+}
+
+void hinic_get_card_func_info_by_card_name(const char *chip_name,
+ struct hinic_card_func_info
+ *card_func)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic_pcidev *dev;
+ struct func_pdev_info *pdev_info;
+
+ card_func->num_pf = 0;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ))
+ continue;
+
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (hinic_func_type(dev->hwdev) == TYPE_VF)
+ continue;
+
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ pdev_info = &card_func->pdev_info[card_func->num_pf];
+ pdev_info->bar0_size = pci_resource_len(dev->pcidev, 0);
+ pdev_info->bar0_phy_addr =
+ pci_resource_start(dev->pcidev, 0);
+
+ card_func->num_pf++;
+ if (card_func->num_pf >= MAX_SIZE)
+ break;
+ }
+ }
+
+ lld_dev_put();
+}
+
+int hinic_get_device_id(void *hwdev, u16 *dev_id)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic_pcidev *dev;
+ u16 vendor_id = 0;
+ u16 device_id = 0;
+
+ chip_node = hinic_get_chip_node_by_hwdev(hwdev);
+ if (!chip_node)
+ return -ENODEV;
+
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ pci_read_config_word(dev->pcidev, 0, &vendor_id);
+ if (vendor_id == HINIC_PCI_VENDOR_ID) {
+ pci_read_config_word(dev->pcidev, 2, &device_id);
+ break;
+ }
+ }
+ lld_dev_put();
+ *dev_id = device_id;
+
+ return 0;
+}
+
+int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic_pcidev *dev;
+
+ chip_node = hinic_get_chip_node_by_hwdev(hwdev);
+ if (!chip_node)
+ return -ENODEV;
+
+ lld_dev_hold();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) ||
+ dev->init_state < HINIC_INIT_STATE_HWDEV_INITED)
+ continue;
+
+ if (hinic_physical_port_id(dev->hwdev) == port_id) {
+ *pf_id = hinic_global_func_id(dev->hwdev);
+ *isvalid = 1;
+ break;
+ }
+ }
+ lld_dev_put();
+
+ return 0;
+}
+
+void hinic_get_fc_devname(char *devname)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->init_state < HINIC_INIT_STATE_NIC_INITED)
+ continue;
+
+ if (HINIC_FUNC_IS_VF(dev->hwdev))
+ continue;
+
+ if (dev->uld_dev[SERVICE_T_FC]) {
+ strscpy(devname,
+ dev->uld_dev_name[SERVICE_T_FC],
+ IFNAMSIZ);
+ lld_dev_put();
+ return;
+ }
+ }
+ }
+ lld_dev_put();
+}
+
+enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *dev = pci_get_drvdata(pdev);
+
+ if (dev)
+ return dev->init_state;
+
+ return HINIC_INIT_STATE_NONE;
+}
+
+enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname)
+{
+ struct hinic_pcidev *dev;
+
+ dev = hinic_get_pcidev_by_ifname(ifname);
+ if (dev)
+ return dev->init_state;
+
+ pr_err("Can not get device %s\n", ifname);
+
+ return HINIC_INIT_STATE_NONE;
+}
+
+int hinic_get_self_test_result(char *ifname, u32 *result)
+{
+ struct hinic_pcidev *dev = NULL;
+
+ dev = hinic_get_pcidev_by_ifname(ifname);
+ if (!dev) {
+ pr_err("Get pcidev failed by ifname: %s\n", ifname);
+ return -EFAULT;
+ }
+
+ *result = be32_to_cpu(readl((u8 __iomem *)(dev->cfg_reg_base) +
+ SELF_TEST_BAR_ADDR_OFFSET));
+ return 0;
+}
+
+struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *pci_adapter;
+ struct hinic_nic_dev *nic_dev;
+
+ if (!lld_dev || !hinic_support_nic(lld_dev->hwdev, NULL))
+ return NULL;
+
+ pci_adapter = pci_get_drvdata(lld_dev->pdev);
+ nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC];
+ if (!nic_dev) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "There's no net device attached on the pci device\n");
+ return NULL;
+ }
+
+ return nic_dev->netdev;
+}
+EXPORT_SYMBOL(hinic_get_netdev_by_lld);
+
+void *hinic_get_hwdev_by_netdev(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (!nic_dev || !netdev)
+ return NULL;
+
+ return nic_dev->hwdev;
+}
+EXPORT_SYMBOL(hinic_get_hwdev_by_netdev);
+
+struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter;
+ struct hinic_nic_dev *nic_dev;
+
+ if (!pdev)
+ return NULL;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ if (!pci_adapter || !hinic_support_nic(pci_adapter->hwdev, NULL))
+ return NULL;
+
+ nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC];
+ if (!nic_dev) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "There`s no net device attached on the pci device\n");
+ return NULL;
+ }
+
+ return nic_dev->netdev;
+}
+EXPORT_SYMBOL(hinic_get_netdev_by_pcidev);
+
+struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ return &pci_adapter->sriov_info;
+}
+
+bool hinic_is_in_host(void)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (dev->init_state > HINIC_INIT_STATE_PCI_INITED &&
+ hinic_func_type(dev->hwdev) != TYPE_VF) {
+ lld_dev_put();
+ return true;
+ }
+ }
+ }
+ lld_dev_put();
+
+ return false;
+}
+
+int hinic_attach_nic(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *dev;
+
+ if (!lld_dev)
+ return -EINVAL;
+
+ dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+ return attach_uld(dev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]);
+}
+EXPORT_SYMBOL(hinic_attach_nic);
+
+void hinic_detach_nic(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *dev;
+
+ if (!lld_dev)
+ return;
+
+ dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+ detach_uld(dev, SERVICE_T_NIC);
+}
+EXPORT_SYMBOL(hinic_detach_nic);
+
+int hinic_attach_roce(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *dev;
+
+ if (!lld_dev)
+ return -EINVAL;
+
+ dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+ return attach_uld(dev, SERVICE_T_ROCE, &g_uld_info[SERVICE_T_ROCE]);
+}
+EXPORT_SYMBOL(hinic_attach_roce);
+
+void hinic_detach_roce(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *dev;
+
+ if (!lld_dev)
+ return;
+
+ dev = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+ detach_uld(dev, SERVICE_T_ROCE);
+}
+EXPORT_SYMBOL(hinic_detach_roce);
+
+static int __set_nic_rss_state(struct hinic_pcidev *dev, bool enable)
+{
+ void *nic_uld;
+ int err = 0;
+
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ return 0;
+
+ nic_uld = dev->uld_dev[SERVICE_T_NIC];
+ if (!hinic_support_nic(dev->hwdev, NULL) || !nic_uld)
+ return 0;
+
+ if (hinic_func_type(dev->hwdev) == TYPE_VF)
+ return 0;
+
+ if (enable)
+ err = hinic_enable_func_rss(nic_uld);
+ else
+ err = hinic_disable_func_rss(nic_uld);
+ if (err) {
+ sdk_err(&dev->pcidev->dev, "Failed to %s rss\n",
+ enable ? "enable" : "disable");
+ }
+
+ return err;
+}
+
+int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *adapter;
+
+ if (!lld_dev)
+ return -EINVAL;
+
+ adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+
+ return __set_nic_rss_state(adapter, false);
+}
+EXPORT_SYMBOL(hinic_disable_nic_rss);
+
+int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev)
+{
+ struct hinic_pcidev *adapter;
+
+ if (!lld_dev)
+ return -EINVAL;
+
+ adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev);
+
+ return __set_nic_rss_state(adapter, true);
+}
+EXPORT_SYMBOL(hinic_enable_nic_rss);
+
+struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *adapter;
+
+ if (!pdev)
+ return NULL;
+
+ adapter = pci_get_drvdata(pdev);
+
+ return &adapter->id;
+}
+EXPORT_SYMBOL(hinic_get_pci_device_id);
+
+static int __set_nic_func_state(struct hinic_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pcidev;
+ u16 func_id;
+ int err;
+ bool enable_nic;
+
+ err = hinic_global_func_id_get(pci_adapter->hwdev, &func_id);
+ if (err)
+ return err;
+
+ err = hinic_get_func_nic_enable(pci_adapter->hwdev, func_id,
+ &enable_nic);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to get nic state\n");
+ return err;
+ }
+
+ if (enable_nic) {
+ if (is_multi_bm_slave(pci_adapter->hwdev))
+ hinic_set_vf_dev_cap(pci_adapter->hwdev);
+
+ err = attach_uld(pci_adapter, SERVICE_T_NIC,
+ &g_uld_info[SERVICE_T_NIC]);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to initialize NIC\n");
+ return err;
+ }
+
+ if (pci_adapter->init_state < HINIC_INIT_STATE_NIC_INITED)
+ pci_adapter->init_state = HINIC_INIT_STATE_NIC_INITED;
+ } else {
+ detach_uld(pci_adapter, SERVICE_T_NIC);
+ }
+
+ return 0;
+}
+
+int hinic_ovs_set_vf_nic_state(struct hinic_lld_dev *lld_dev, u16 vf_func_id,
+ bool en)
+{
+ struct hinic_pcidev *dev, *des_dev;
+ struct hinic_nic_dev *uld_dev;
+ int err = -EFAULT;
+
+ if (!lld_dev)
+ return -EINVAL;
+
+ dev = pci_get_drvdata(lld_dev->pdev);
+
+ if (!dev)
+ return -EFAULT;
+ /* find func_idx pci_adapter and disable or enable nic */
+ lld_dev_hold();
+ list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag))
+ continue;
+
+ if (des_dev->init_state <
+ HINIC_INIT_STATE_DBGTOOL_INITED &&
+ !test_bit(HINIC_FUNC_PRB_ERR,
+ &des_dev->flag))
+ continue;
+
+ if (hinic_global_func_id(des_dev->hwdev) != vf_func_id)
+ continue;
+
+ if (des_dev->init_state <
+ HINIC_INIT_STATE_DBGTOOL_INITED) {
+ break;
+ }
+
+ sdk_info(&dev->pcidev->dev, "Receive event: %s vf%d nic\n",
+ en ? "enable" : "disable", vf_func_id);
+
+ err = 0;
+ if (en) {
+ if (des_dev->uld_dev[SERVICE_T_NIC]) {
+ sdk_err(&des_dev->pcidev->dev,
+ "%s driver has attached to pcie device, cannot set VF max_queue_num\n",
+ s_uld_name[SERVICE_T_NIC]);
+ } else {
+ err = hinic_set_vf_dev_cap(des_dev->hwdev);
+
+ if (err) {
+ sdk_err(&des_dev->pcidev->dev,
+ "%s driver Set VF max_queue_num failed, err=%d\n",
+ s_uld_name[SERVICE_T_NIC], err);
+
+ break;
+ }
+ }
+
+ err = attach_uld(des_dev, SERVICE_T_NIC,
+ &g_uld_info[SERVICE_T_NIC]);
+ if (err) {
+ sdk_err(&des_dev->pcidev->dev, "Failed to initialize NIC\n");
+ break;
+ }
+
+ uld_dev = (struct hinic_nic_dev *)
+ (des_dev->uld_dev[SERVICE_T_NIC]);
+ uld_dev->in_vm = true;
+ uld_dev->is_vm_slave =
+ is_multi_vm_slave(uld_dev->hwdev);
+ uld_dev->is_bm_slave =
+ is_multi_bm_slave(uld_dev->hwdev);
+ if (des_dev->init_state < HINIC_INIT_STATE_NIC_INITED)
+ des_dev->init_state =
+ HINIC_INIT_STATE_NIC_INITED;
+ } else {
+ detach_uld(des_dev, SERVICE_T_NIC);
+ }
+
+ break;
+ }
+ lld_dev_put();
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_ovs_set_vf_nic_state);
+
+static void slave_host_mgmt_work(struct work_struct *work)
+{
+ struct hinic_pcidev *pci_adapter =
+ container_of(work, struct hinic_pcidev, slave_nic_work);
+
+ __set_nic_func_state(pci_adapter);
+}
+
+static void __multi_host_mgmt(struct hinic_pcidev *dev,
+ struct hinic_multi_host_mgmt_event *mhost_mgmt)
+{
+ struct hinic_pcidev *des_dev;
+ struct hinic_mhost_nic_func_state *nic_state = {0};
+
+ switch (mhost_mgmt->sub_cmd) {
+ case HINIC_MHOST_NIC_STATE_CHANGE:
+ nic_state = mhost_mgmt->data;
+
+ nic_state->status = 0;
+
+ /* find func_idx pci_adapter and disable or enable nic */
+ lld_dev_hold();
+ list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag))
+ continue;
+
+ if (des_dev->init_state <
+ HINIC_INIT_STATE_DBGTOOL_INITED &&
+ !test_bit(HINIC_FUNC_PRB_ERR,
+ &des_dev->flag))
+ continue;
+
+ if (hinic_global_func_id_hw(des_dev->hwdev) !=
+ nic_state->func_idx)
+ continue;
+
+ if (des_dev->init_state <
+ HINIC_INIT_STATE_DBGTOOL_INITED) {
+ nic_state->status =
+ test_bit(HINIC_FUNC_PRB_ERR,
+ &des_dev->flag) ? 1 : 0;
+ break;
+ }
+
+ sdk_info(&dev->pcidev->dev, "Receive nic state changed event, state: %d\n",
+ nic_state->enable);
+
+ /* schedule_work */
+ schedule_work(&des_dev->slave_nic_work);
+
+ break;
+ }
+ lld_dev_put();
+
+ break;
+
+ default:
+ sdk_warn(&dev->pcidev->dev, "Received unknown multi-host mgmt event %d\n",
+ mhost_mgmt->sub_cmd);
+ break;
+ }
+}
+
+static void send_uld_dev_event(struct hinic_pcidev *dev,
+ struct hinic_event_info *event)
+{
+ enum hinic_service_type type;
+
+ for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) {
+ if (test_and_set_bit(type, &dev->state)) {
+ sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler, %s is in detach\n",
+ event->type, s_uld_name[type]);
+ continue;
+ }
+
+ if (g_uld_info[type].event)
+ g_uld_info[type].event(&dev->lld_dev,
+ dev->uld_dev[type], event);
+ clear_bit(type, &dev->state);
+ }
+}
+
+static void send_event_to_all_pf(struct hinic_pcidev *dev,
+ struct hinic_event_info *event)
+{
+ struct hinic_pcidev *des_dev = NULL;
+
+ lld_dev_hold();
+ list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag) ||
+ des_dev->init_state < HINIC_INIT_STATE_HW_IF_INITED)
+ continue;
+
+ if (hinic_func_type(des_dev->hwdev) == TYPE_VF)
+ continue;
+
+ send_uld_dev_event(des_dev, event);
+ }
+ lld_dev_put();
+}
+
+static void send_event_to_dst_pf(struct hinic_pcidev *dev, u16 func_id,
+ struct hinic_event_info *event)
+{
+ struct hinic_pcidev *des_dev = NULL;
+
+ lld_dev_hold();
+ list_for_each_entry(des_dev, &dev->chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag) ||
+ des_dev->init_state < HINIC_INIT_STATE_HW_IF_INITED)
+ continue;
+
+ if (hinic_func_type(des_dev->hwdev) == TYPE_VF)
+ continue;
+
+ if (hinic_global_func_id(des_dev->hwdev) == func_id) {
+ send_uld_dev_event(des_dev, event);
+ break;
+ }
+ }
+ lld_dev_put();
+}
+
+void hinic_event_process(void *adapter, struct hinic_event_info *event)
+{
+ struct hinic_pcidev *dev = adapter;
+ u16 func_id;
+
+ switch (event->type) {
+ case HINIC_EVENT_FMW_ACT_NTC:
+ hinic_sync_time_to_fmw(dev);
+ break;
+ case HINIC_EVENT_MCTP_GET_HOST_INFO:
+ __mctp_get_host_info(dev, &event->mctp_info);
+ break;
+ case HINIC_EVENT_MULTI_HOST_MGMT:
+ __multi_host_mgmt(dev, &event->mhost_mgmt);
+ break;
+ case HINIC_EVENT_FAULT:
+ if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR &&
+ event->info.event.chip.func_id < HINIC_MAX_PF_NUM) {
+ func_id = event->info.event.chip.func_id;
+ send_event_to_dst_pf(adapter, func_id, event);
+ } else {
+ send_uld_dev_event(adapter, event);
+ }
+ break;
+ case HINIC_EVENT_MGMT_WATCHDOG_EVENT:
+ send_event_to_all_pf(adapter, event);
+ break;
+ default:
+ send_uld_dev_event(adapter, event);
+ break;
+ }
+}
+
+static int mapping_bar(struct pci_dev *pdev, struct hinic_pcidev *pci_adapter)
+{
+ u32 db_dwqe_size;
+ u64 dwqe_addr;
+
+ pci_adapter->cfg_reg_base =
+ pci_ioremap_bar(pdev, HINIC_PCI_CFG_REG_BAR);
+ if (!pci_adapter->cfg_reg_base) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to map configuration regs\n");
+ return -ENOMEM;
+ }
+
+ pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
+ HINIC_PCI_INTR_REG_BAR);
+ if (!pci_adapter->intr_reg_base) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to map interrupt regs\n");
+ goto map_intr_bar_err;
+ }
+
+ db_dwqe_size = hinic_get_db_size(pci_adapter->cfg_reg_base,
+ &pci_adapter->chip_mode);
+
+ pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC_PCI_DB_BAR);
+ pci_adapter->db_base = ioremap(pci_adapter->db_base_phy,
+ db_dwqe_size);
+ if (!pci_adapter->db_base) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to map doorbell regs\n");
+ goto map_db_err;
+ }
+
+ if (pci_adapter->chip_mode != CHIP_MODE_NORMAL)
+ return 0;
+
+ dwqe_addr = pci_adapter->db_base_phy + db_dwqe_size;
+
+#if defined(__aarch64__)
+ /* arm do not support call ioremap_wc() */
+ pci_adapter->dwqe_mapping = ioremap(dwqe_addr, db_dwqe_size);
+#else
+ pci_adapter->dwqe_mapping = io_mapping_create_wc(dwqe_addr,
+ db_dwqe_size);
+
+#endif
+ if (!pci_adapter->dwqe_mapping) {
+ sdk_err(&pci_adapter->pcidev->dev, "Failed to io_mapping_create_wc\n");
+ goto mapping_dwqe_err;
+ }
+
+ return 0;
+
+mapping_dwqe_err:
+ iounmap(pci_adapter->db_base);
+
+map_db_err:
+ iounmap(pci_adapter->intr_reg_base);
+
+map_intr_bar_err:
+ iounmap(pci_adapter->cfg_reg_base);
+
+ return -ENOMEM;
+}
+
+static void unmapping_bar(struct hinic_pcidev *pci_adapter)
+{
+ if (pci_adapter->chip_mode == CHIP_MODE_NORMAL) {
+#if defined(__aarch64__)
+ iounmap(pci_adapter->dwqe_mapping);
+#else
+ io_mapping_free(pci_adapter->dwqe_mapping);
+#endif
+ }
+
+ iounmap(pci_adapter->db_base);
+ iounmap(pci_adapter->intr_reg_base);
+ iounmap(pci_adapter->cfg_reg_base);
+}
+
+static int alloc_chip_node(struct hinic_pcidev *pci_adapter)
+{
+ struct card_node *chip_node;
+ unsigned char i;
+ unsigned char parent_bus_number = 0;
+ int err;
+
+ if (!pci_is_root_bus(pci_adapter->pcidev->bus))
+ parent_bus_number = pci_adapter->pcidev->bus->parent->number;
+
+ if (parent_bus_number != 0) {
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ if (chip_node->dp_bus_num == parent_bus_number) {
+ pci_adapter->chip_node = chip_node;
+ return 0;
+ }
+ }
+ } else if (pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF ||
+ pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF_HV) {
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ if (chip_node) {
+ pci_adapter->chip_node = chip_node;
+ return 0;
+ }
+ }
+ }
+
+ for (i = 0; i < MAX_CARD_ID; i++) {
+ if (!FIND_BIT(card_bit_map, i)) {
+ card_bit_map = (u64)SET_BIT(card_bit_map, i);
+ break;
+ }
+ }
+
+ if (i == MAX_CARD_ID) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to alloc card id\n");
+ return -EFAULT;
+ }
+
+ chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL);
+ if (!chip_node)
+ goto alloc_chip_err;
+
+ chip_node->dbgtool_attr_file.name = kzalloc(IFNAMSIZ, GFP_KERNEL);
+ if (!(chip_node->dbgtool_attr_file.name)) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to alloc dbgtool attr file name\n");
+ goto alloc_dbgtool_attr_file_err;
+ }
+
+ /* parent bus number */
+ chip_node->dp_bus_num = parent_bus_number;
+
+ err = snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d",
+ HINIC_CHIP_NAME, i);
+ if (err <= 0 || err >= IFNAMSIZ) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to snprintf chip_name, function return(%d) and dest_len(%d)\n",
+ err, IFNAMSIZ);
+ goto alloc_dbgtool_attr_file_err;
+ }
+
+ err = snprintf((char *)chip_node->dbgtool_attr_file.name,
+ IFNAMSIZ, "%s%d", HINIC_CHIP_NAME, i);
+ if (err <= 0 || err >= IFNAMSIZ) {
+ sdk_err(&pci_adapter->pcidev->dev,
+ "Failed to snprintf dbgtool_attr_file_name, function return(%d) and dest_len(%d)\n",
+ err, IFNAMSIZ);
+ goto alloc_dbgtool_attr_file_err;
+ }
+
+ sdk_info(&pci_adapter->pcidev->dev,
+ "Add new chip %s to global list succeed\n",
+ chip_node->chip_name);
+
+ list_add_tail(&chip_node->node, &g_hinic_chip_list);
+
+ INIT_LIST_HEAD(&chip_node->func_list);
+ pci_adapter->chip_node = chip_node;
+
+ mutex_init(&chip_node->sfp_mutex);
+
+ return 0;
+
+alloc_dbgtool_attr_file_err:
+ kfree(chip_node);
+
+alloc_chip_err:
+ card_bit_map = CLEAR_BIT(card_bit_map, i);
+ return -ENOMEM;
+}
+
+static void free_chip_node(struct hinic_pcidev *pci_adapter)
+{
+ struct card_node *chip_node = pci_adapter->chip_node;
+ u32 id;
+ int err;
+
+ if (!(card_bit_map & BIT(pci_adapter->card_id)))
+ return;
+
+ if (list_empty(&chip_node->func_list)) {
+ list_del(&chip_node->node);
+ sdk_info(&pci_adapter->pcidev->dev,
+ "Delete chip %s from global list succeed\n",
+ chip_node->chip_name);
+ err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%u", &id);
+ if (err <= 0)
+ sdk_err(&pci_adapter->pcidev->dev, "Failed to get hinic id\n");
+
+ card_bit_map = CLEAR_BIT(card_bit_map, id);
+
+ kfree(chip_node->dbgtool_attr_file.name);
+ kfree(chip_node);
+ }
+}
+
+static bool hinic_get_vf_load_state(struct pci_dev *pdev)
+{
+ unsigned char parent_bus_number;
+ struct card_node *chip_node;
+ u8 id;
+
+ if (!pdev->is_virtfn)
+ return false;
+
+ /* vf used in vm */
+ if (pci_is_root_bus(pdev->bus))
+ return disable_vf_load;
+
+ parent_bus_number = pdev->bus->parent->number;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ if (chip_node->dp_bus_num == parent_bus_number) {
+ for (id = 0; id < HINIC_MAX_PF_NUM; id++) {
+ if (chip_node->pf_bus_num[id] ==
+ pdev->bus->number) {
+ lld_dev_put();
+ return chip_node->disable_vf_load[id];
+ }
+ }
+ }
+ }
+ lld_dev_put();
+
+ return disable_vf_load;
+}
+
+static void hinic_set_vf_load_state(struct hinic_pcidev *pci_adapter,
+ bool vf_load_state)
+{
+ struct card_node *chip_node;
+ u16 func_id;
+
+ if (hinic_func_type(pci_adapter->hwdev) == TYPE_VF)
+ return;
+
+ /* The VF on the BM slave side must be probed */
+ if (is_multi_bm_slave(pci_adapter->hwdev))
+ vf_load_state = false;
+
+ func_id = hinic_global_func_id_hw(pci_adapter->hwdev);
+
+ chip_node = pci_adapter->chip_node;
+ chip_node->disable_vf_load[func_id] = vf_load_state;
+ chip_node->pf_bus_num[func_id] = pci_adapter->pcidev->bus->number;
+
+ sdk_info(&pci_adapter->pcidev->dev, "Current function support %s, %s vf load in host\n",
+ (hinic_support_ovs(pci_adapter->hwdev, NULL) ? "ovs" : "nic"),
+ (vf_load_state ? "disable" : "enable"));
+}
+
+int hinic_ovs_set_vf_load_state(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter;
+
+ if (!pdev) {
+ pr_err("pdev is null\n");
+ return -EINVAL;
+ }
+
+ pci_adapter = pci_get_drvdata(pdev);
+ if (!pci_adapter) {
+ pr_err("pci_adapter is null\n");
+ return -EFAULT;
+ }
+
+ hinic_set_vf_load_state(pci_adapter, disable_vf_load);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_ovs_set_vf_load_state);
+
+static int hinic_config_deft_mrss(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static int hinic_config_pci_cto(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static int hinic_pci_init(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = NULL;
+ int err;
+
+ err = hinic_config_deft_mrss(pdev);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to configure Max Read Request Size\n");
+ return err;
+ }
+
+ err = hinic_config_pci_cto(pdev);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to configure Completion timeout\n");
+ return err;
+ }
+
+ pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
+ if (!pci_adapter)
+ return -ENOMEM;
+
+ pci_adapter->pcidev = pdev;
+ mutex_init(&pci_adapter->pdev_mutex);
+
+ pci_set_drvdata(pdev, pci_adapter);
+
+#ifdef CONFIG_PCI_IOV
+ if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) {
+ sdk_info(&pdev->dev, "VFs are not binded to hinic\n");
+ return 0;
+ }
+#endif
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to enable PCI device\n");
+ goto pci_enable_err;
+ }
+
+ err = pci_request_regions(pdev, HINIC_DRV_NAME);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to request regions\n");
+ goto pci_regions_err;
+ }
+
+ // pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto dma_mask_err;
+ }
+ }
+
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ sdk_warn(&pdev->dev,
+ "Couldn't set 64-bit coherent DMA mask\n");
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ sdk_err(&pdev->dev,
+ "Failed to set coherent DMA mask\n");
+ goto dma_consistnet_mask_err;
+ }
+ }
+
+ return 0;
+
+dma_consistnet_mask_err:
+dma_mask_err:
+ pci_clear_master(pdev);
+ // pci_disable_pcie_error_reporting(pdev);
+ pci_release_regions(pdev);
+
+pci_regions_err:
+ pci_disable_device(pdev);
+
+pci_enable_err:
+ pci_set_drvdata(pdev, NULL);
+ kfree(pci_adapter);
+
+ return err;
+}
+
+static void hinic_pci_deinit(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ // pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ kfree(pci_adapter);
+}
+
+static void hinic_notify_ppf_unreg(struct hinic_pcidev *pci_adapter)
+{
+ struct card_node *chip_node = pci_adapter->chip_node;
+ struct hinic_pcidev *dev;
+
+ if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
+ return;
+
+ lld_lock_chip_node();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ hinic_ppf_hwdev_unreg(dev->hwdev);
+ }
+ lld_unlock_chip_node();
+}
+
+static void hinic_notify_ppf_reg(struct hinic_pcidev *pci_adapter)
+{
+ struct card_node *chip_node = pci_adapter->chip_node;
+ struct hinic_pcidev *dev;
+
+ if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF)
+ return;
+
+ lld_lock_chip_node();
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ hinic_ppf_hwdev_reg(dev->hwdev, pci_adapter->hwdev);
+ }
+ lld_unlock_chip_node();
+}
+
+#ifdef CONFIG_X86
+/**
+ * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma
+ * order register to zero
+ * @pci_adapter: pci adapter
+ */
+/*lint -save -e40 */
+void cfg_order_reg(struct hinic_pcidev *pci_adapter)
+{
+ u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56};
+ struct cpuinfo_x86 *cpuinfo;
+ u32 i;
+
+ if (HINIC_FUNC_IS_VF(pci_adapter->hwdev))
+ return;
+
+ cpuinfo = &cpu_data(0);
+ for (i = 0; i < sizeof(cpu_model); i++) {
+ if (cpu_model[i] == cpuinfo->x86_model)
+ hinic_set_pcie_order_cfg(pci_adapter->hwdev);
+ }
+}
+
+/*lint -restore*/
+#endif
+
+static int hinic_func_init(struct pci_dev *pdev,
+ struct hinic_pcidev *pci_adapter)
+{
+ struct hinic_init_para init_para;
+ bool vf_load_state;
+ int err;
+
+ init_para.adapter_hdl = pci_adapter;
+ init_para.pcidev_hdl = pdev;
+ init_para.dev_hdl = &pdev->dev;
+ init_para.cfg_reg_base = pci_adapter->cfg_reg_base;
+ init_para.intr_reg_base = pci_adapter->intr_reg_base;
+ init_para.db_base = pci_adapter->db_base;
+ init_para.db_base_phy = pci_adapter->db_base_phy;
+ init_para.dwqe_mapping = pci_adapter->dwqe_mapping;
+ init_para.hwdev = &pci_adapter->hwdev;
+ init_para.chip_node = pci_adapter->chip_node;
+ init_para.ppf_hwdev = hinic_get_ppf_hwdev_by_pdev(pdev);
+ err = hinic_init_hwdev(&init_para);
+ if (err < 0) {
+ pci_adapter->hwdev = NULL;
+ sdk_err(&pdev->dev, "Failed to initialize hardware device\n");
+ return -EFAULT;
+ } else if (err > 0) {
+ if (err == (1 << HINIC_HWDEV_ALL_INITED) &&
+ pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED) {
+ pci_adapter->init_state = HINIC_INIT_STATE_HW_IF_INITED;
+ sdk_info(&pdev->dev,
+ "Initialize hardware device later\n");
+ queue_delayed_work(pci_adapter->slave_nic_init_workq,
+ &pci_adapter->slave_nic_init_dwork,
+ HINIC_SLAVE_NIC_DELAY_TIME);
+ set_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
+ } else if (err != (1 << HINIC_HWDEV_ALL_INITED)) {
+ sdk_err(&pdev->dev,
+ "Initialize hardware device partitial failed\n");
+ hinic_detect_version_compatible(pci_adapter);
+ hinic_notify_ppf_reg(pci_adapter);
+ pci_adapter->init_state =
+ HINIC_INIT_STATE_HW_PART_INITED;
+ }
+ return -EFAULT;
+ }
+
+ hinic_notify_ppf_reg(pci_adapter);
+ pci_adapter->init_state = HINIC_INIT_STATE_HWDEV_INITED;
+
+ vf_load_state = hinic_support_ovs(pci_adapter->hwdev, NULL) ?
+ true : disable_vf_load;
+
+ hinic_set_vf_load_state(pci_adapter, vf_load_state);
+ hinic_qps_num_set(pci_adapter->hwdev, 0);
+
+ pci_adapter->lld_dev.pdev = pdev;
+ pci_adapter->lld_dev.hwdev = pci_adapter->hwdev;
+ pci_adapter->sriov_info.pdev = pdev;
+ pci_adapter->sriov_info.hwdev = pci_adapter->hwdev;
+
+ hinic_event_register(pci_adapter->hwdev, pci_adapter,
+ hinic_event_process);
+
+ if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev))
+ hinic_sync_time_to_fmw(pci_adapter);
+ hinic_init_syncfw_timer(pci_adapter);
+
+ /* dbgtool init */
+ lld_lock_chip_node();
+ err = hinic_dbgtool_knl_init(pci_adapter->hwdev, pci_adapter->chip_node);
+ if (err) {
+ lld_unlock_chip_node();
+ sdk_err(&pdev->dev, "Failed to initialize dbgtool\n");
+ hinic_destroy_syncfw_timer(pci_adapter);
+ hinic_event_unregister(pci_adapter->hwdev);
+ return err;
+ }
+ lld_unlock_chip_node();
+
+ pci_adapter->init_state = HINIC_INIT_STATE_DBGTOOL_INITED;
+
+ err = hinic_detect_version_compatible(pci_adapter);
+ if (err)
+ return err;
+
+ if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev) &&
+ FUNC_ENABLE_SRIOV_IN_DEFAULT(pci_adapter->hwdev)) {
+ hinic_pci_sriov_enable(pdev,
+ hinic_func_max_vf(pci_adapter->hwdev));
+ }
+
+ /* NIC is base driver, probe firstly */
+ err = __set_nic_func_state(pci_adapter);
+ if (err)
+ return err;
+
+ attach_ulds(pci_adapter);
+
+#ifdef CONFIG_X86
+ cfg_order_reg(pci_adapter);
+#endif
+
+ sdk_info(&pdev->dev, "Pcie device probed\n");
+ pci_adapter->init_state = HINIC_INIT_STATE_ALL_INITED;
+
+ return 0;
+}
+
+static void hinic_func_deinit(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ /* When function deinit, disable mgmt initiative report events firstly,
+ * then flush mgmt work-queue.
+ */
+ hinic_disable_mgmt_msg_report(pci_adapter->hwdev);
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED)
+ hinic_flush_mgmt_workq(pci_adapter->hwdev);
+
+ hinic_set_func_deinit_flag(pci_adapter->hwdev);
+
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_NIC_INITED) {
+ detach_ulds(pci_adapter);
+ detach_uld(pci_adapter, SERVICE_T_NIC);
+ }
+
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_DBGTOOL_INITED) {
+ lld_lock_chip_node();
+ hinic_dbgtool_knl_deinit(pci_adapter->hwdev, pci_adapter->chip_node);
+ lld_unlock_chip_node();
+ hinic_destroy_syncfw_timer(pci_adapter);
+ hinic_event_unregister(pci_adapter->hwdev);
+ }
+
+ hinic_notify_ppf_unreg(pci_adapter);
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED) {
+ /* Remove the current node from node-list first,
+ * then it's safe to free hwdev
+ */
+ lld_lock_chip_node();
+ list_del(&pci_adapter->node);
+ lld_unlock_chip_node();
+
+ hinic_free_hwdev(pci_adapter->hwdev);
+ }
+}
+
+static void wait_tool_unused(void)
+{
+ u32 loop_cnt = 0;
+
+ while (loop_cnt < HINIC_WAIT_TOOL_CNT_TIMEOUT) {
+ if (!atomic_read(&tool_used_cnt))
+ return;
+
+ usleep_range(9900, 10000);
+ loop_cnt++;
+ }
+}
+
+static inline void wait_sriov_cfg_complete(struct hinic_pcidev *pci_adapter)
+{
+ struct hinic_sriov_info *sriov_info;
+ u32 loop_cnt = 0;
+
+ sriov_info = &pci_adapter->sriov_info;
+
+ set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
+ usleep_range(9900, 10000);
+
+ while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
+ if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
+ !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
+ return;
+
+ usleep_range(9900, 10000);
+ loop_cnt++;
+ }
+}
+
+static void hinic_remove(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ if (!pci_adapter)
+ return;
+
+ sdk_info(&pdev->dev, "Pcie device remove begin\n");
+#ifdef CONFIG_PCI_IOV
+ if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) {
+ pci_set_drvdata(pdev, NULL);
+ kfree(pci_adapter);
+ return;
+ }
+#endif
+ cancel_delayed_work_sync(&pci_adapter->slave_nic_init_dwork);
+ flush_workqueue(pci_adapter->slave_nic_init_workq);
+ destroy_workqueue(pci_adapter->slave_nic_init_workq);
+
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED)
+ hinic_detect_hw_present(pci_adapter->hwdev);
+
+ switch (pci_adapter->init_state) {
+ case HINIC_INIT_STATE_ALL_INITED:
+ case HINIC_INIT_STATE_NIC_INITED:
+ /* Don't support hotplug when SR-IOV is enabled now.
+ * So disable SR-IOV capability as normal.
+ */
+ if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) {
+ wait_sriov_cfg_complete(pci_adapter);
+ hinic_pci_sriov_disable(pdev);
+ }
+ fallthrough;
+ case HINIC_INIT_STATE_DBGTOOL_INITED:
+ case HINIC_INIT_STATE_HWDEV_INITED:
+ case HINIC_INIT_STATE_HW_PART_INITED:
+ case HINIC_INIT_STATE_HW_IF_INITED:
+ case HINIC_INIT_STATE_PCI_INITED:
+ set_bit(HINIC_FUNC_IN_REMOVE, &pci_adapter->flag);
+ lld_lock_chip_node();
+ cancel_work_sync(&pci_adapter->slave_nic_work);
+ lld_unlock_chip_node();
+
+ wait_tool_unused();
+
+ if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED)
+ hinic_func_deinit(pdev);
+
+ lld_lock_chip_node();
+ if (pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED)
+ list_del(&pci_adapter->node);
+ hinic_tool_k_uninit();
+ free_chip_node(pci_adapter);
+ lld_unlock_chip_node();
+ unmapping_bar(pci_adapter);
+ hinic_pci_deinit(pdev);
+
+ break;
+
+ default:
+ break;
+ }
+
+ sdk_info(&pdev->dev, "Pcie device removed\n");
+}
+
+static void slave_host_init_delay_work(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct hinic_pcidev *pci_adapter = container_of(delay,
+ struct hinic_pcidev, slave_nic_init_dwork);
+ struct pci_dev *pdev = pci_adapter->pcidev;
+ struct card_node *chip_node = pci_adapter->chip_node;
+ int found = 0;
+ struct hinic_pcidev *ppf_pcidev = NULL;
+ int err;
+
+ if (!hinic_get_master_host_mbox_enable(pci_adapter->hwdev)) {
+ queue_delayed_work(pci_adapter->slave_nic_init_workq,
+ &pci_adapter->slave_nic_init_dwork,
+ HINIC_SLAVE_NIC_DELAY_TIME);
+ return;
+ }
+ if (hinic_func_type(pci_adapter->hwdev) == TYPE_PPF) {
+ err = hinic_func_init(pdev, pci_adapter);
+ clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
+ if (err)
+ set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
+ return;
+ }
+
+ /* Make sure the PPF must be the first one */
+ lld_dev_hold();
+ list_for_each_entry(ppf_pcidev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &ppf_pcidev->flag) ||
+ ppf_pcidev->init_state < HINIC_INIT_STATE_HW_IF_INITED)
+ continue;
+
+ if (hinic_func_type(ppf_pcidev->hwdev) == TYPE_PPF) {
+ found = 1;
+ break;
+ }
+ }
+ lld_dev_put();
+ if (found && ppf_pcidev->init_state == HINIC_INIT_STATE_ALL_INITED) {
+ err = hinic_func_init(pdev, pci_adapter);
+ clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
+ if (err)
+ set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
+ } else {
+ queue_delayed_work(pci_adapter->slave_nic_init_workq,
+ &pci_adapter->slave_nic_init_dwork,
+ HINIC_SLAVE_NIC_DELAY_TIME);
+ }
+}
+
+static int hinic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hinic_pcidev *pci_adapter;
+ int err;
+
+ sdk_info(&pdev->dev, "Pcie device probe begin\n");
+
+ err = hinic_pci_init(pdev);
+ if (err)
+ return err;
+
+#ifdef CONFIG_PCI_IOV
+ if (pdev->is_virtfn && hinic_get_vf_load_state(pdev))
+ return 0;
+#endif
+
+ pci_adapter = pci_get_drvdata(pdev);
+ clear_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
+ clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag);
+ err = mapping_bar(pdev, pci_adapter);
+ if (err) {
+ sdk_err(&pdev->dev, "Failed to map bar\n");
+ goto map_bar_failed;
+ }
+
+ pci_adapter->id = *id;
+ INIT_WORK(&pci_adapter->slave_nic_work, slave_host_mgmt_work);
+ pci_adapter->slave_nic_init_workq =
+ create_singlethread_workqueue(HINIC_SLAVE_NIC_DELAY);
+ if (!pci_adapter->slave_nic_init_workq) {
+ sdk_err(&pdev->dev,
+ "Failed to create work queue: %s\n",
+ HINIC_SLAVE_NIC_DELAY);
+ goto ceate_nic_delay_work_fail;
+ }
+ INIT_DELAYED_WORK(&pci_adapter->slave_nic_init_dwork,
+ slave_host_init_delay_work);
+
+ /* if chip information of pcie function exist,
+ * add the function into chip
+ */
+ lld_lock_chip_node();
+ err = alloc_chip_node(pci_adapter);
+ if (err) {
+ sdk_err(&pdev->dev,
+ "Failed to add new chip node to global list\n");
+ goto alloc_chip_node_fail;
+ }
+
+ sscanf(pci_adapter->chip_node->chip_name, HINIC_CHIP_NAME "%d",
+ &pci_adapter->card_id);
+
+ err = hinic_tool_k_init();
+ if (err) {
+ sdk_warn(&pdev->dev, "Failed to init nictool");
+ goto init_nictool_err;
+ }
+
+ list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list);
+
+ lld_unlock_chip_node();
+
+ pci_adapter->init_state = HINIC_INIT_STATE_PCI_INITED;
+
+ err = hinic_func_init(pdev, pci_adapter);
+ if (err)
+ goto func_init_err;
+
+ return 0;
+
+func_init_err:
+ if (!test_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag))
+ set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag);
+ return 0;
+
+init_nictool_err:
+ free_chip_node(pci_adapter);
+
+alloc_chip_node_fail:
+ lld_unlock_chip_node();
+
+ceate_nic_delay_work_fail:
+ unmapping_bar(pci_adapter);
+
+map_bar_failed:
+ hinic_pci_deinit(pdev);
+
+ sdk_err(&pdev->dev, "Pcie device probe failed\n");
+ return err;
+}
+
+/*lint -save -e133 -e10*/
+static const struct pci_device_id hinic_pci_table[] = {
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PF), HINIC_BOARD_25GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF_HV), 0},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_SMTIO), HINIC_BOARD_PG_SM_25GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_100GE),
+ HINIC_BOARD_PG_100GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_TP_10GE),
+ HINIC_BOARD_PG_TP_10GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_40GE), HINIC_BOARD_40GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_100GE), HINIC_BOARD_100GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_25GE), HINIC_BOARD_25GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_MULTI_HOST), HINIC_BOARD_25GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_100GE), HINIC_BOARD_100GE},
+ {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_DUAL_25GE), HINIC_BOARD_25GE},
+ {0, 0}
+};
+
+/*lint -restore*/
+MODULE_DEVICE_TABLE(pci, hinic_pci_table);
+
+/**
+ * hinic_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ *
+ * Since we only need error detecting not error handling, so we
+ * always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER
+ * driver that we don't need reset(error handling).
+ */
+static pci_ers_result_t hinic_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct hinic_pcidev *pci_adapter;
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ sdk_err(&pdev->dev,
+ "Uncorrectable error detected, log and cleanup error status: 0x%08x\n",
+ state);
+
+ pci_adapter = pci_get_drvdata(pdev);
+ if (pci_adapter)
+ hinic_record_pcie_error(pci_adapter->hwdev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static void hinic_shutdown(struct pci_dev *pdev)
+{
+ struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ sdk_info(&pdev->dev, "Shutdown device\n");
+
+ if (pci_adapter)
+ hinic_shutdown_hwdev(pci_adapter->hwdev);
+
+ pci_disable_device(pdev);
+
+ if (pci_adapter)
+ hinic_set_api_stop(pci_adapter->hwdev);
+}
+
+/* Cause we only need error detecting not error handling, so only error_detected
+ * callback is enough.
+ */
+static struct pci_error_handlers hinic_err_handler = {
+ .error_detected = hinic_io_error_detected,
+};
+
+static struct pci_driver hinic_driver = {
+ .name = HINIC_DRV_NAME,
+ .id_table = hinic_pci_table,
+ .probe = hinic_probe,
+ .remove = hinic_remove,
+ .shutdown = hinic_shutdown,
+ .sriov_configure = hinic_pci_sriov_configure,
+ .err_handler = &hinic_err_handler
+};
+
+static int __init hinic_lld_init(void)
+{
+ pr_info("%s - version %s\n", HINIC_DRV_DESC, HINIC_DRV_VERSION);
+ memset(g_uld_info, 0, sizeof(g_uld_info));
+ atomic_set(&tool_used_cnt, 0);
+
+ hinic_lld_lock_init();
+
+ /* register nic driver information first, and add net device in
+ * nic_probe called by hinic_probe.
+ */
+ hinic_register_uld(SERVICE_T_NIC, &nic_uld_info);
+
+ return pci_register_driver(&hinic_driver);
+}
+
+static void __exit hinic_lld_exit(void)
+{
+ pci_unregister_driver(&hinic_driver);
+
+ hinic_unregister_uld(SERVICE_T_NIC);
+}
+
+module_init(hinic_lld_init);
+module_exit(hinic_lld_exit);
+
+int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!micro_log_info || !micro_log_info->init ||
+ !micro_log_info->deinit) {
+ pr_err("Invalid information of micro log info to register\n");
+ return -EINVAL;
+ }
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) ||
+ dev->init_state < HINIC_INIT_STATE_HW_IF_INITED)
+ continue;
+
+ if (hinic_func_type(dev->hwdev) == TYPE_PPF) {
+ if (micro_log_info->init(dev->hwdev)) {
+ sdk_err(&dev->pcidev->dev,
+ "micro log init failed\n");
+ continue;
+ }
+ }
+ }
+ }
+ lld_dev_put();
+ pr_info("Register micro log succeed\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_register_micro_log);
+
+void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info)
+{
+ struct card_node *chip_node;
+ struct hinic_pcidev *dev;
+
+ if (!micro_log_info)
+ return;
+
+ lld_dev_hold();
+ list_for_each_entry(chip_node, &g_hinic_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) ||
+ dev->init_state < HINIC_INIT_STATE_HW_IF_INITED)
+ continue;
+
+ if (hinic_func_type(dev->hwdev) == TYPE_PPF)
+ micro_log_info->deinit(dev->hwdev);
+ }
+ }
+ lld_dev_put();
+ pr_info("Unregister micro log succeed\n");
+}
+EXPORT_SYMBOL(hinic_unregister_micro_log);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.h b/drivers/net/ethernet/huawei/hinic/hinic_lld.h
new file mode 100644
index 000000000000..414dfd146f9a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_LLD_H_
+#define HINIC_LLD_H_
+
+#define HINIC_SLAVE_NIC_DELAY "hinic_slave_nic_delay"
+#define HINIC_SLAVE_NIC_DELAY_TIME (5 * HZ)
+
+struct hinic_lld_dev {
+ struct pci_dev *pdev;
+ void *hwdev;
+};
+
+enum hinic_init_state {
+ HINIC_INIT_STATE_NONE,
+ HINIC_INIT_STATE_PCI_INITED,
+ HINIC_INIT_STATE_HW_IF_INITED,
+ HINIC_INIT_STATE_HW_PART_INITED,
+ HINIC_INIT_STATE_HWDEV_INITED,
+ HINIC_INIT_STATE_DBGTOOL_INITED,
+ HINIC_INIT_STATE_NIC_INITED,
+ HINIC_INIT_STATE_ALL_INITED,
+};
+
+struct hinic_uld_info {
+ /* uld_dev: should not return null even the function capability
+ * is not support the up layer driver
+ * uld_dev_name: NIC driver should copy net device name.
+ * FC driver could copy fc device name.
+ * other up layer driver don`t need copy anything
+ */
+ int (*probe)(struct hinic_lld_dev *lld_dev,
+ void **uld_dev, char *uld_dev_name);
+ void (*remove)(struct hinic_lld_dev *lld_dev, void *uld_dev);
+ int (*suspend)(struct hinic_lld_dev *lld_dev,
+ void *uld_dev, pm_message_t state);
+ int (*resume)(struct hinic_lld_dev *lld_dev, void *uld_dev);
+ void (*event)(struct hinic_lld_dev *lld_dev, void *uld_dev,
+ struct hinic_event_info *event);
+ int (*ioctl)(void *uld_dev, u32 cmd, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size);
+};
+
+/* Used for the ULD HiNIC PCIe driver registration interface,
+ * the original interface is service_register_interface
+ */
+int hinic_register_uld(enum hinic_service_type uld_type,
+ struct hinic_uld_info *uld_info);
+
+/* Used for the ULD HiNIC PCIe driver unregistration interface,
+ * the original interface is service_unregister_interface
+ */
+void hinic_unregister_uld(enum hinic_service_type uld_type);
+
+void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev,
+ enum hinic_service_type type);
+
+/* used for TOE/IWARP */
+struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev);
+/* used for TOE/IWARP */
+void *hinic_get_hwdev_by_netdev(struct net_device *netdev);
+
+struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev);
+void *hinic_get_hwdev_by_ifname(char *ifname);
+int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname);
+void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type);
+void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type);
+
+int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]);
+int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up);
+int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted,
+ u8 *cos_up);
+void hinic_get_all_chip_id(void *card_id);
+void hinic_get_card_info(void *hwdev, void *bufin);
+int hinic_get_device_id(void *hwdev, u16 *dev_id);
+void hinic_get_fc_devname(char *devname);
+int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid);
+
+void hinic_tool_cnt_inc(void);
+void hinic_tool_cnt_dec(void);
+
+struct hinic_sriov_info;
+struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev);
+
+/* for dpdk */
+void *hinic_get_pci_dev(u16 bdf);
+void hinic_dpdk_pcie_remove(void *pdev);
+int hinic_dpdk_pcie_probe(void *pdev);
+
+int hinic_attach_nic(struct hinic_lld_dev *lld_dev);
+void hinic_detach_nic(struct hinic_lld_dev *lld_dev);
+
+int hinic_attach_roce(struct hinic_lld_dev *lld_dev);
+void hinic_detach_roce(struct hinic_lld_dev *lld_dev);
+
+int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev);
+int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev);
+
+int hinic_ovs_set_vf_nic_state(struct hinic_lld_dev *lld_dev,
+ u16 vf_func_id, bool en);
+
+int hinic_ovs_set_vf_load_state(struct pci_dev *pdev);
+
+int hinic_get_self_test_result(char *ifname, u32 *result);
+enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname);
+enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev);
+
+extern struct hinic_uld_info g_uld_info[SERVICE_T_MAX];
+
+struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev);
+bool hinic_is_in_host(void);
+
+bool hinic_is_valid_bar_addr(u64 offset);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 499c657d37a9..864a0d02829d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -1,1506 +1,3120 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/device.h>
-#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
-#include <linux/slab.h>
#include <linux/if_vlan.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
-#include <net/ip.h>
-#include <net/devlink.h>
-#include <linux/bitops.h>
-#include <linux/bitmap.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-
-#include "hinic_debugfs.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_dev.h"
-#include "hinic_devlink.h"
-#include "hinic_port.h"
+#include <linux/ethtool.h>
+#include <linux/dcbnl.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/debugfs.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw.h"
+#include "hinic_dbg.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic_dev.h"
#include "hinic_tx.h"
#include "hinic_rx.h"
-#include "hinic_dev.h"
+#include "hinic_qp.h"
+#include "hinic_dcb.h"
+#include "hinic_lld.h"
#include "hinic_sriov.h"
+#include "hinic_pci_id_tbl.h"
-MODULE_AUTHOR("Huawei Technologies CO., Ltd");
-MODULE_DESCRIPTION("Huawei Intelligent NIC driver");
-MODULE_LICENSE("GPL");
-
-static unsigned int tx_weight = 64;
-module_param(tx_weight, uint, 0644);
-MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)");
+static u16 num_qps;
+module_param(num_qps, ushort, 0444);
+MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default unset)");
-static unsigned int rx_weight = 64;
-module_param(rx_weight, uint, 0644);
-MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
+static u16 ovs_num_qps = 16;
+module_param(ovs_num_qps, ushort, 0444);
+MODULE_PARM_DESC(ovs_num_qps, "Number of Queue Pairs in ovs mode (default=16)");
-#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822
-#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200
-#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205
-#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210
-#define HINIC_DEV_ID_VF 0x375e
+#define DEFAULT_POLL_WEIGHT 64
+static unsigned int poll_weight = DEFAULT_POLL_WEIGHT;
+module_param(poll_weight, uint, 0444);
+MODULE_PARM_DESC(poll_weight, "Number packets for NAPI budget (default=64)");
-#define HINIC_WQ_NAME "hinic_dev"
+#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 32
+#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
-#define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+/* suit for sdi3.0 vm mode, change this define for test best performance */
+#define SDI_VM_PENDING_LIMT 2
+#define SDI_VM_COALESCE_TIMER_CFG 16
+#define SDI_VM_RX_PKT_RATE_HIGH 1000000
+#define SDI_VM_RX_PKT_RATE_LOW 30000
+#define SDI_VM_RX_USECS_HIGH 56
+#define SDI_VM_RX_PENDING_LIMT_HIGH 20
+#define SDI_VM_RX_USECS_LOW 16
+#define SDI_VM_RX_PENDING_LIMT_LOW 2
+
+/* if qp_coalesc_use_drv_params_switch !=0, use user setting params */
+static unsigned char qp_coalesc_use_drv_params_switch;
+module_param(qp_coalesc_use_drv_params_switch, byte, 0444);
+MODULE_PARM_DESC(qp_coalesc_use_drv_params_switch, "QP MSI-X Interrupt coalescing parameter switch (default=0, not use drv parameter)");
+
+static unsigned char qp_pending_limit = HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
+module_param(qp_pending_limit, byte, 0444);
+MODULE_PARM_DESC(qp_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)");
+
+static unsigned char qp_coalesc_timer_cfg =
+ HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+module_param(qp_coalesc_timer_cfg, byte, 0444);
+MODULE_PARM_DESC(qp_coalesc_timer_cfg, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=32)");
+
+/* For arm64 server, the best known configuration of lro max wqe number
+ * is 4 (8K), for x86_64 server, it is 8 (16K). You can also
+ * configure these values by hinicadm.
+ */
+static unsigned char set_max_wqe_num;
+module_param(set_max_wqe_num, byte, 0444);
+MODULE_PARM_DESC(set_max_wqe_num, "Set lro max wqe number, valid range is 1 - 32, default is 4(arm) / 8(x86)");
-#define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8
+#define DEFAULT_RX_BUFF_LEN 2
+u16 rx_buff = DEFAULT_RX_BUFF_LEN;
+module_param(rx_buff, ushort, 0444);
+MODULE_PARM_DESC(rx_buff, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB");
-#define HINIC_LRO_RX_TIMER_DEFAULT 16
+static u32 set_lro_timer;
+module_param(set_lro_timer, uint, 0444);
+MODULE_PARM_DESC(set_lro_timer, "Set lro timer in micro second, valid range is 1 - 1024, default is 16");
-#define work_to_rx_mode_work(work) \
- container_of(work, struct hinic_rx_mode_work, work)
+static unsigned char set_link_status_follow = HINIC_LINK_FOLLOW_STATUS_MAX;
+module_param(set_link_status_follow, byte, 0444);
+MODULE_PARM_DESC(set_link_status_follow, "Set link status follow port status. 0 - default, 1 - follow, 2 - separate, other - unset. (default unset)");
-#define rx_mode_work_to_nic_dev(rx_mode_work) \
- container_of(rx_mode_work, struct hinic_dev, rx_mode_work)
+static unsigned int lro_replenish_thld = 256;
+module_param(lro_replenish_thld, uint, 0444);
+MODULE_PARM_DESC(lro_replenish_thld, "Number wqe for lro replenish buffer (default=256)");
-#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+static bool l2nic_interrupt_switch = true;
+module_param(l2nic_interrupt_switch, bool, 0644);
+MODULE_PARM_DESC(l2nic_interrupt_switch, "Control whether execute l2nic io interrupt switch or not, default is true");
-#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2
-#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 32
-#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
+static unsigned char lro_en_status = HINIC_LRO_STATUS_UNSET;
+module_param(lro_en_status, byte, 0444);
+MODULE_PARM_DESC(lro_en_status, "lro enable status. 0 - disable, 1 - enable, other - unset. (default unset)");
-static int change_mac_addr(struct net_device *netdev, const u8 *addr);
+static unsigned char qp_pending_limit_low = HINIC_RX_PENDING_LIMIT_LOW;
+module_param(qp_pending_limit_low, byte, 0444);
+MODULE_PARM_DESC(qp_pending_limit_low, "MSI-X adaptive low coalesce pending limit, range is 0 - 255");
-static int set_features(struct hinic_dev *nic_dev,
- netdev_features_t pre_features,
- netdev_features_t features, bool force_change);
+static unsigned char qp_coalesc_timer_low = HINIC_RX_COAL_TIME_LOW;
+module_param(qp_coalesc_timer_low, byte, 0444);
+MODULE_PARM_DESC(qp_coalesc_timer_low, "MSI-X adaptive low coalesce time, range is 0 - 255");
-static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
-{
- struct hinic_rxq_stats rx_stats;
+static unsigned char qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH;
+module_param(qp_pending_limit_high, byte, 0444);
+MODULE_PARM_DESC(qp_pending_limit_high, "MSI-X adaptive high coalesce pending limit, range is 0 - 255");
- hinic_rxq_get_stats(rxq, &rx_stats);
+static unsigned char qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH;
+module_param(qp_coalesc_timer_high, byte, 0444);
+MODULE_PARM_DESC(qp_coalesc_timer_high, "MSI-X adaptive high coalesce time, range is 0 - 255");
- nic_rx_stats->bytes += rx_stats.bytes;
- nic_rx_stats->pkts += rx_stats.pkts;
- nic_rx_stats->errors += rx_stats.errors;
- nic_rx_stats->csum_errors += rx_stats.csum_errors;
- nic_rx_stats->other_errors += rx_stats.other_errors;
-}
+#define HINIC_NIC_DEV_WQ_NAME "hinic_nic_dev_wq"
-static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq)
-{
- struct hinic_txq_stats tx_stats;
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_RX_ERR)
- hinic_txq_get_stats(txq, &tx_stats);
+#define QID_MASKED(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1))
- nic_tx_stats->bytes += tx_stats.bytes;
- nic_tx_stats->pkts += tx_stats.pkts;
- nic_tx_stats->tx_busy += tx_stats.tx_busy;
- nic_tx_stats->tx_wake += tx_stats.tx_wake;
- nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
- nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
-}
+#define VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap))
-static void gather_nic_stats(struct hinic_dev *nic_dev,
- struct hinic_rxq_stats *nic_rx_stats,
- struct hinic_txq_stats *nic_tx_stats)
-{
- int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
+#define VLAN_BITMAP_BITS_SIZE(nic_dev) (VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8)
- for (i = 0; i < num_qps; i++)
- gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]);
+#define VLAN_NUM_BITMAPS(nic_dev) (VLAN_N_VID / \
+ VLAN_BITMAP_BITS_SIZE(nic_dev))
- for (i = 0; i < num_qps; i++)
- gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]);
-}
+#define VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \
+ VLAN_BITMAP_BYTE_SIZE(nic_dev))
-/**
- * create_txqs - Create the Logical Tx Queues of specific NIC device
- * @nic_dev: the specific NIC device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int create_txqs(struct hinic_dev *nic_dev)
-{
- int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
- struct net_device *netdev = nic_dev->netdev;
+#define VID_LINE(nic_dev, vid) ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev))
+#define VID_COL(nic_dev, vid) ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1))
- if (nic_dev->txqs)
- return -EINVAL;
+enum hinic_rx_mod {
+ HINIC_RX_MODE_UC = 1 << 0,
+ HINIC_RX_MODE_MC = 1 << 1,
+ HINIC_RX_MODE_BC = 1 << 2,
+ HINIC_RX_MODE_MC_ALL = 1 << 3,
+ HINIC_RX_MODE_PROMISC = 1 << 4,
+};
- nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
- sizeof(*nic_dev->txqs), GFP_KERNEL);
- if (!nic_dev->txqs)
- return -ENOMEM;
+enum hinic_rx_buff_len {
+ RX_BUFF_VALID_2KB = 2,
+ RX_BUFF_VALID_4KB = 4,
+ RX_BUFF_VALID_8KB = 8,
+ RX_BUFF_VALID_16KB = 16,
+};
- hinic_sq_dbgfs_init(nic_dev);
+#define HINIC_AVG_PKT_SMALL 256U
+#define HINIC_MODERATONE_DELAY HZ
+#define CONVERT_UNIT 1024
- for (i = 0; i < num_txqs; i++) {
- struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
+#define HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 2
+#define HINIC_VLAN_CLEAR_OFFLOAD (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \
+ NETIF_F_ALL_TSO)
- err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to init Txq\n");
- goto err_init_txq;
+int hinic_netdev_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *real_dev, *ret;
+ struct hinic_nic_dev *nic_dev;
+ u16 vlan_depth;
+
+ if (!is_vlan_dev(ndev))
+ return NOTIFY_DONE;
+
+ dev_hold(ndev);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ real_dev = vlan_dev_real_dev(ndev);
+ nic_dev = hinic_get_uld_dev_by_ifname(real_dev->name,
+ SERVICE_T_NIC);
+ if (!nic_dev)
+ goto out;
+
+ vlan_depth = 1;
+ ret = vlan_dev_priv(ndev)->real_dev;
+ while (is_vlan_dev(ret)) {
+ ret = vlan_dev_priv(ret)->real_dev;
+ vlan_depth++;
}
- err = hinic_sq_debug_add(nic_dev, i);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to add SQ%d debug\n", i);
- goto err_add_sq_dbg;
+ if (vlan_depth == HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) {
+ ndev->vlan_features &= (~HINIC_VLAN_CLEAR_OFFLOAD);
+ } else if (vlan_depth > HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) {
+ ndev->hw_features &= (~HINIC_VLAN_CLEAR_OFFLOAD);
+ ndev->features &= (~HINIC_VLAN_CLEAR_OFFLOAD);
}
- }
- return 0;
+ break;
-err_add_sq_dbg:
- hinic_clean_txq(&nic_dev->txqs[i]);
-err_init_txq:
- for (j = 0; j < i; j++) {
- hinic_sq_debug_rem(nic_dev->txqs[j].sq);
- hinic_clean_txq(&nic_dev->txqs[j]);
- }
+ default:
+ break;
+ };
- hinic_sq_dbgfs_uninit(nic_dev);
+out:
+ dev_put(ndev);
- devm_kfree(&netdev->dev, nic_dev->txqs);
- return err;
+ return NOTIFY_DONE;
}
-static void enable_txqs_napi(struct hinic_dev *nic_dev)
+/* used for netdev notifier register/unregister */
+DEFINE_MUTEX(g_hinic_netdev_notifiers_mutex);
+static int hinic_netdev_notifiers_ref_cnt;
+static struct notifier_block hinic_netdev_notifier = {
+ .notifier_call = hinic_netdev_event,
+};
+
+static void hinic_register_notifier(struct hinic_nic_dev *nic_dev)
{
- int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
- int i;
+ int err;
- for (i = 0; i < num_txqs; i++)
- napi_enable(&nic_dev->txqs[i].napi);
+ mutex_lock(&g_hinic_netdev_notifiers_mutex);
+ hinic_netdev_notifiers_ref_cnt++;
+ if (hinic_netdev_notifiers_ref_cnt == 1) {
+ err = register_netdevice_notifier(&hinic_netdev_notifier);
+ if (err) {
+ hinic_info(nic_dev, drv, "Register netdevice notifier failed, err: %d\n",
+ err);
+ hinic_netdev_notifiers_ref_cnt--;
+ }
+ }
+ mutex_unlock(&g_hinic_netdev_notifiers_mutex);
}
-static void disable_txqs_napi(struct hinic_dev *nic_dev)
+static void hinic_unregister_notifier(struct hinic_nic_dev *nic_dev)
{
- int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
- int i;
+ mutex_lock(&g_hinic_netdev_notifiers_mutex);
+ if (hinic_netdev_notifiers_ref_cnt == 1)
+ unregister_netdevice_notifier(&hinic_netdev_notifier);
- for (i = 0; i < num_txqs; i++)
- napi_disable(&nic_dev->txqs[i].napi);
+ if (hinic_netdev_notifiers_ref_cnt)
+ hinic_netdev_notifiers_ref_cnt--;
+ mutex_unlock(&g_hinic_netdev_notifiers_mutex);
}
-/**
- * free_txqs - Free the Logical Tx Queues of specific NIC device
- * @nic_dev: the specific NIC device
- **/
-static void free_txqs(struct hinic_dev *nic_dev)
+void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status)
{
- int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- if (!nic_dev->txqs)
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) ||
+ test_bit(HINIC_LP_TEST, &nic_dev->flags))
return;
- for (i = 0; i < num_txqs; i++) {
- hinic_sq_debug_rem(nic_dev->txqs[i].sq);
- hinic_clean_txq(&nic_dev->txqs[i]);
- }
+ if (status) {
+ if (netif_carrier_ok(netdev))
+ return;
- hinic_sq_dbgfs_uninit(nic_dev);
+ nic_dev->link_status = status;
+ netif_carrier_on(netdev);
+ nicif_info(nic_dev, link, netdev, "Link is up\n");
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
- devm_kfree(&netdev->dev, nic_dev->txqs);
- nic_dev->txqs = NULL;
+ nic_dev->link_status = status;
+ netif_carrier_off(netdev);
+ nicif_info(nic_dev, link, netdev, "Link is down\n");
+ }
}
-/**
- * create_rxqs - Create the Logical Rx Queues of specific NIC device
- * @nic_dev: the specific NIC device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int create_rxqs(struct hinic_dev *nic_dev)
+static void hinic_heart_lost(struct hinic_nic_dev *nic_dev)
{
- int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
- struct net_device *netdev = nic_dev->netdev;
-
- if (nic_dev->rxqs)
- return -EINVAL;
-
- nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs,
- sizeof(*nic_dev->rxqs), GFP_KERNEL);
- if (!nic_dev->rxqs)
- return -ENOMEM;
-
- hinic_rq_dbgfs_init(nic_dev);
+ nic_dev->heart_status = false;
+}
- for (i = 0; i < num_rxqs; i++) {
- struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
+static int hinic_setup_qps_resources(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ int err;
- err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to init rxq\n");
- goto err_init_rxq;
- }
+ err = hinic_setup_all_tx_resources(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to create Tx queues\n");
+ return err;
+ }
- err = hinic_rq_debug_add(nic_dev, i);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to add RQ%d debug\n", i);
- goto err_add_rq_dbg;
- }
+ err = hinic_setup_all_rx_resources(netdev, nic_dev->qps_irq_info);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to create Rx queues\n");
+ goto create_rxqs_err;
}
return 0;
-err_add_rq_dbg:
- hinic_clean_rxq(&nic_dev->rxqs[i]);
-err_init_rxq:
- for (j = 0; j < i; j++) {
- hinic_rq_debug_rem(nic_dev->rxqs[j].rq);
- hinic_clean_rxq(&nic_dev->rxqs[j]);
- }
-
- hinic_rq_dbgfs_uninit(nic_dev);
+create_rxqs_err:
+ hinic_free_all_tx_resources(netdev);
- devm_kfree(&netdev->dev, nic_dev->rxqs);
return err;
}
-/**
- * free_rxqs - Free the Logical Rx Queues of specific NIC device
- * @nic_dev: the specific NIC device
- **/
-static void free_rxqs(struct hinic_dev *nic_dev)
+static int hinic_configure(struct hinic_nic_dev *nic_dev)
{
- int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
+ int err;
- if (!nic_dev->rxqs)
- return;
-
- for (i = 0; i < num_rxqs; i++) {
- hinic_rq_debug_rem(nic_dev->rxqs[i].rq);
- hinic_clean_rxq(&nic_dev->rxqs[i]);
+ /* rx rss init */
+ err = hinic_rx_configure(netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to configure rx\n");
+ return err;
}
- hinic_rq_dbgfs_uninit(nic_dev);
-
- devm_kfree(&netdev->dev, nic_dev->rxqs);
- nic_dev->rxqs = NULL;
+ return 0;
}
-static int hinic_configure_max_qnum(struct hinic_dev *nic_dev)
+static void hinic_remove_configure(struct hinic_nic_dev *nic_dev)
{
- return hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
+ hinic_rx_remove_configure(nic_dev->netdev);
}
-static int hinic_rss_init(struct hinic_dev *nic_dev)
+static void hinic_setup_dcb_qps(struct hinic_nic_dev *nic_dev, u16 max_qps)
{
- u8 default_rss_key[HINIC_RSS_KEY_SIZE];
- u8 tmpl_idx = nic_dev->rss_tmpl_idx;
- u32 *indir_tbl;
- int err, i;
+ struct net_device *netdev = nic_dev->netdev;
+ u16 num_rss;
+ u8 num_tcs;
+ u8 i;
- indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL);
- if (!indir_tbl)
- return -ENOMEM;
+ if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) ||
+ !test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
+ return;
- netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key));
- for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
- indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss);
+ num_tcs = (u8)netdev_get_num_tc(netdev);
+ /* For now, we don't support to change num_tcs */
+ if (num_tcs != nic_dev->max_cos || max_qps < num_tcs) {
+ nicif_err(nic_dev, drv, netdev, "Invalid num_tcs: %d or num_qps: %d, disable DCB\n",
+ num_tcs, max_qps);
+ netdev_reset_tc(netdev);
+ clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags);
+ /* if we can't enable rss or get enough num_qps,
+ * need to sync default configure to hw
+ */
+ hinic_configure_dcb(netdev);
+ } else {
+ /* We bind sq with cos but not tc */
+ num_rss = (u16)(max_qps / nic_dev->max_cos);
+ num_rss = min_t(u16, num_rss, nic_dev->rss_limit);
+ for (i = 0; i < nic_dev->max_cos; i++)
+ netdev_set_tc_queue(netdev, i, num_rss,
+ (u16)(num_rss * i));
+
+ nic_dev->num_rss = num_rss;
+ nic_dev->num_qps = (u16)(num_tcs * num_rss);
+ }
+}
- err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key);
- if (err)
- goto out;
+/* determin num_qps from rss_tmpl_id/irq_num/dcb_en */
+static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ u32 irq_size;
+ u16 resp_irq_num, i;
+ int err;
- err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl);
- if (err)
- goto out;
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ nic_dev->num_rss = nic_dev->rss_limit;
+ nic_dev->num_qps = nic_dev->rss_limit;
+ } else {
+ nic_dev->num_rss = 0;
+ nic_dev->num_qps = 1;
+ }
- err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type);
- if (err)
- goto out;
+ hinic_setup_dcb_qps(nic_dev, nic_dev->max_qps);
- err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx,
- nic_dev->rss_hash_engine);
- if (err)
- goto out;
+ irq_size = sizeof(*nic_dev->qps_irq_info) * nic_dev->num_qps;
+ if (!irq_size) {
+ nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size entries\n");
+ return -EINVAL;
+ }
+ nic_dev->qps_irq_info = kzalloc(irq_size, GFP_KERNEL);
+ if (!nic_dev->qps_irq_info)
+ return -ENOMEM;
- err = hinic_rss_cfg(nic_dev, 1, tmpl_idx);
- if (err)
- goto out;
+ err = hinic_alloc_irqs(nic_dev->hwdev, SERVICE_T_NIC, nic_dev->num_qps,
+ nic_dev->qps_irq_info, &resp_irq_num);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to alloc irqs\n");
+ kfree(nic_dev->qps_irq_info);
+ return err;
+ }
-out:
- kfree(indir_tbl);
- return err;
-}
+ /* available irq number is less than rq numbers, adjust rq numbers */
+ if (resp_irq_num < nic_dev->num_qps) {
+ nic_dev->num_qps = resp_irq_num;
+ nic_dev->num_rss = nic_dev->num_qps;
+ hinic_setup_dcb_qps(nic_dev, nic_dev->num_qps);
+ nicif_warn(nic_dev, drv, netdev,
+ "Can not get enough irqs, adjust num_qps to %d\n",
+ nic_dev->num_qps);
+ /* after adjust num_qps, free the remaind irq */
+ for (i = nic_dev->num_qps; i < resp_irq_num; i++)
+ hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC,
+ nic_dev->qps_irq_info[i].irq_id);
+ }
-static void hinic_rss_deinit(struct hinic_dev *nic_dev)
-{
- hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx);
+ nicif_info(nic_dev, drv, netdev, "Finally num_qps: %d, num_rss: %d\n",
+ nic_dev->num_qps, nic_dev->num_rss);
+
+ return 0;
}
-static void hinic_init_rss_parameters(struct hinic_dev *nic_dev)
+static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)
{
- nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR;
- nic_dev->rss_type.tcp_ipv6_ext = 1;
- nic_dev->rss_type.ipv6_ext = 1;
- nic_dev->rss_type.tcp_ipv6 = 1;
- nic_dev->rss_type.ipv6 = 1;
- nic_dev->rss_type.tcp_ipv4 = 1;
- nic_dev->rss_type.ipv4 = 1;
- nic_dev->rss_type.udp_ipv6 = 1;
- nic_dev->rss_type.udp_ipv4 = 1;
+ u16 i;
+
+ for (i = 0; i < nic_dev->num_qps; i++)
+ hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC,
+ nic_dev->qps_irq_info[i].irq_id);
+
+ kfree(nic_dev->qps_irq_info);
}
-static void hinic_enable_rss(struct hinic_dev *nic_dev)
+static int hinic_poll(struct napi_struct *napi, int budget)
{
- struct net_device *netdev = nic_dev->netdev;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int i, node, err = 0;
- u16 num_cpus = 0;
+ int tx_pkts, rx_pkts;
+ struct hinic_irq *irq_cfg = container_of(napi, struct hinic_irq, napi);
+ struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
- if (nic_dev->max_qps <= 1) {
- nic_dev->flags &= ~HINIC_RSS_ENABLE;
- nic_dev->rss_limit = nic_dev->max_qps;
- nic_dev->num_qps = nic_dev->max_qps;
- nic_dev->num_rss = nic_dev->max_qps;
+ rx_pkts = hinic_rx_poll(irq_cfg->rxq, budget);
- return;
- }
+ tx_pkts = hinic_tx_poll(irq_cfg->txq, budget);
- err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n");
- nic_dev->flags &= ~HINIC_RSS_ENABLE;
- nic_dev->max_qps = 1;
- nic_dev->rss_limit = nic_dev->max_qps;
- nic_dev->num_qps = nic_dev->max_qps;
- nic_dev->num_rss = nic_dev->max_qps;
+ if (tx_pkts >= budget || rx_pkts >= budget)
+ return budget;
- return;
+ set_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag);
+ rx_pkts += hinic_rx_poll(irq_cfg->rxq, budget - rx_pkts);
+ tx_pkts += hinic_tx_poll(irq_cfg->txq, budget - tx_pkts);
+ if (rx_pkts >= budget || tx_pkts >= budget) {
+ clear_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag);
+ return budget;
}
- nic_dev->flags |= HINIC_RSS_ENABLE;
-
- for (i = 0; i < num_online_cpus(); i++) {
- node = cpu_to_node(i);
- if (node == dev_to_node(&pdev->dev))
- num_cpus++;
+ napi_complete(napi);
+
+ if (!test_and_set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag)) {
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ hinic_set_msix_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC_MSIX_ENABLE);
+ else if (!nic_dev->in_vm &&
+ (hinic_get_func_mode(nic_dev->hwdev) ==
+ FUNC_MOD_NORMAL_HOST))
+ enable_irq(irq_cfg->irq_id);
}
- if (!num_cpus)
- num_cpus = num_online_cpus();
-
- nic_dev->num_qps = hinic_hwdev_num_qps(hwdev);
- nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
+ return max(tx_pkts, rx_pkts);
+}
- nic_dev->rss_limit = nic_dev->num_qps;
- nic_dev->num_rss = nic_dev->num_qps;
+static void qp_add_napi(struct hinic_irq *irq_cfg)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
- hinic_init_rss_parameters(nic_dev);
- err = hinic_rss_init(nic_dev);
- if (err)
- netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
+ netif_napi_add_tx_weight(nic_dev->netdev, &irq_cfg->napi,
+ hinic_poll, nic_dev->poll_weight);
+ napi_enable(&irq_cfg->napi);
}
-int hinic_open(struct net_device *netdev)
+static void qp_del_napi(struct hinic_irq *irq_cfg)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- enum hinic_port_link_state link_state;
- int err, ret;
+ napi_disable(&irq_cfg->napi);
+ netif_napi_del(&irq_cfg->napi);
+}
- if (!(nic_dev->flags & HINIC_INTF_UP)) {
- err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth,
- nic_dev->rq_depth);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed - HW interface up\n");
- return err;
+static irqreturn_t qp_irq(int irq, void *data)
+{
+ struct hinic_irq *irq_cfg = (struct hinic_irq *)data;
+ struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+ u16 msix_entry_idx = irq_cfg->msix_entry_idx;
+
+ if (napi_schedule_prep(&irq_cfg->napi)) {
+ if (l2nic_interrupt_switch) {
+ /* Disable the interrupt until napi will be completed */
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ hinic_set_msix_state(nic_dev->hwdev,
+ msix_entry_idx,
+ HINIC_MSIX_DISABLE);
+ } else if (!nic_dev->in_vm &&
+ (hinic_get_func_mode(nic_dev->hwdev) ==
+ FUNC_MOD_NORMAL_HOST)) {
+ disable_irq_nosync(irq_cfg->irq_id);
+ }
+
+ clear_bit(HINIC_INTR_ON, &irq_cfg->intr_flag);
}
- }
- err = create_txqs(nic_dev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to create Tx queues\n");
- goto err_create_txqs;
- }
+ hinic_misx_intr_clear_resend_bit(nic_dev->hwdev,
+ msix_entry_idx, 1);
- enable_txqs_napi(nic_dev);
+ clear_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag);
- err = create_rxqs(nic_dev);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to create Rx queues\n");
- goto err_create_rxqs;
+ __napi_schedule(&irq_cfg->napi);
+ } else if (!test_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag)) {
+ hinic_misx_intr_clear_resend_bit(nic_dev->hwdev, msix_entry_idx,
+ 1);
}
- hinic_enable_rss(nic_dev);
-
- err = hinic_configure_max_qnum(nic_dev);
- if (err) {
- netif_err(nic_dev, drv, nic_dev->netdev,
- "Failed to configure the maximum number of queues\n");
- goto err_port_state;
- }
+ return IRQ_HANDLED;
+}
- netif_set_real_num_tx_queues(netdev, nic_dev->num_qps);
- netif_set_real_num_rx_queues(netdev, nic_dev->num_qps);
+static int hinic_request_irq(struct hinic_irq *irq_cfg, u16 q_id)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+ struct nic_interrupt_info info = {0};
+ int err;
- err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE);
+ qp_add_napi(irq_cfg);
+
+ info.msix_index = irq_cfg->msix_entry_idx;
+ info.lli_set = 0;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limt = nic_dev->intr_coalesce[q_id].pending_limt;
+ info.coalesc_timer_cfg =
+ nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
+ info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+ nic_dev->rxqs[q_id].last_coalesc_timer_cfg =
+ nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
+ nic_dev->rxqs[q_id].last_pending_limt =
+ nic_dev->intr_coalesce[q_id].pending_limt;
+ err = hinic_set_interrupt_cfg(nic_dev->hwdev, info);
if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to set port state\n");
- goto err_port_state;
+ nicif_err(nic_dev, drv, irq_cfg->netdev,
+ "Failed to set RX interrupt coalescing attribute\n");
+ qp_del_napi(irq_cfg);
+ return err;
}
- err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE);
+ err = request_irq(irq_cfg->irq_id, &qp_irq, 0,
+ irq_cfg->irq_name, irq_cfg);
if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to set func port state\n");
- goto err_func_port_state;
+ nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to request Rx irq\n");
+ qp_del_napi(irq_cfg);
+ return err;
}
- down(&nic_dev->mgmt_lock);
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask);
- err = hinic_port_link_state(nic_dev, &link_state);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to get link state\n");
- goto err_port_link;
- }
+ return 0;
+}
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state);
+static int set_interrupt_moder(struct hinic_nic_dev *nic_dev, u16 q_id,
+ u8 coalesc_timer_cfg, u8 pending_limt)
+{
+ struct nic_interrupt_info interrupt_info = {0};
+ int err;
- if (link_state == HINIC_LINK_STATE_UP) {
- nic_dev->flags |= HINIC_LINK_UP;
- nic_dev->cable_unplugged = false;
- nic_dev->module_unrecognized = false;
+ if (coalesc_timer_cfg == nic_dev->rxqs[q_id].last_coalesc_timer_cfg &&
+ pending_limt == nic_dev->rxqs[q_id].last_pending_limt)
+ return 0;
+
+ /* netdev not running or qp not in using,
+ * don't need to set coalesce to hw
+ */
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) ||
+ q_id >= nic_dev->num_qps)
+ return 0;
+
+ interrupt_info.lli_set = 0;
+ interrupt_info.interrupt_coalesc_set = 1;
+ interrupt_info.coalesc_timer_cfg = coalesc_timer_cfg;
+ interrupt_info.pending_limt = pending_limt;
+ interrupt_info.msix_index = nic_dev->irq_cfg[q_id].msix_entry_idx;
+ interrupt_info.resend_timer_cfg =
+ nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+
+ err = hinic_set_interrupt_cfg(nic_dev->hwdev, interrupt_info);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed modifying moderation for Queue: %d\n", q_id);
+ } else {
+ nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg;
+ nic_dev->rxqs[q_id].last_pending_limt = pending_limt;
}
- nic_dev->flags |= HINIC_INTF_UP;
+ return err;
+}
- if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
- (HINIC_LINK_UP | HINIC_INTF_UP)) {
- netif_info(nic_dev, drv, netdev, "link + intf UP\n");
- netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
+static void __calc_coal_para(struct hinic_nic_dev *nic_dev,
+ struct hinic_intr_coal_info *q_coal, u64 rate,
+ u8 *coalesc_timer_cfg, u8 *pending_limt)
+{
+ if (rate < q_coal->pkt_rate_low) {
+ *coalesc_timer_cfg = q_coal->rx_usecs_low;
+ *pending_limt = q_coal->rx_pending_limt_low;
+ } else if (rate > q_coal->pkt_rate_high) {
+ *coalesc_timer_cfg = q_coal->rx_usecs_high;
+ *pending_limt = q_coal->rx_pending_limt_high;
+ } else {
+ *coalesc_timer_cfg =
+ (u8)((rate - q_coal->pkt_rate_low) *
+ (q_coal->rx_usecs_high -
+ q_coal->rx_usecs_low) /
+ (q_coal->pkt_rate_high -
+ q_coal->pkt_rate_low) +
+ q_coal->rx_usecs_low);
+ if (nic_dev->in_vm)
+ *pending_limt = (u8)((rate - q_coal->pkt_rate_low) *
+ (q_coal->rx_pending_limt_high -
+ q_coal->rx_pending_limt_low) /
+ (q_coal->pkt_rate_high -
+ q_coal->pkt_rate_low) +
+ q_coal->rx_pending_limt_low);
+ else
+ *pending_limt = q_coal->rx_pending_limt_low;
}
-
- up(&nic_dev->mgmt_lock);
-
- netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n");
- return 0;
-
-err_port_link:
- up(&nic_dev->mgmt_lock);
- ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
- if (ret)
- netif_warn(nic_dev, drv, netdev,
- "Failed to revert func port state\n");
-
-err_func_port_state:
- ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
- if (ret)
- netif_warn(nic_dev, drv, netdev,
- "Failed to revert port state\n");
-err_port_state:
- free_rxqs(nic_dev);
- if (nic_dev->flags & HINIC_RSS_ENABLE) {
- hinic_rss_deinit(nic_dev);
- hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
- }
-
-err_create_rxqs:
- disable_txqs_napi(nic_dev);
- free_txqs(nic_dev);
-
-err_create_txqs:
- if (!(nic_dev->flags & HINIC_INTF_UP))
- hinic_hwdev_ifdown(nic_dev->hwdev);
- return err;
}
-int hinic_close(struct net_device *netdev)
+static void update_queue_coal(struct hinic_nic_dev *nic_dev, u16 qid,
+ u64 rate, u64 avg_pkt_size, u64 tx_rate)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- unsigned int flags;
+ struct hinic_intr_coal_info *q_coal;
+ u8 coalesc_timer_cfg, pending_limt;
- /* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */
- disable_txqs_napi(nic_dev);
+ q_coal = &nic_dev->intr_coalesce[qid];
- down(&nic_dev->mgmt_lock);
+ if ((rate > HINIC_RX_RATE_THRESH &&
+ avg_pkt_size > HINIC_AVG_PKT_SMALL) ||
+ (nic_dev->in_vm && rate > HINIC_RX_RATE_THRESH)) {
+ __calc_coal_para(nic_dev, q_coal, rate,
+ &coalesc_timer_cfg, &pending_limt);
+ } else {
+ coalesc_timer_cfg = HINIC_LOWEST_LATENCY;
+ pending_limt = q_coal->rx_pending_limt_low;
+ }
- flags = nic_dev->flags;
- nic_dev->flags &= ~HINIC_INTF_UP;
+ set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg,
+ pending_limt);
+}
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
+#define SDI_VM_PPS_3W 30000
+#define SDI_VM_PPS_5W 50000
- up(&nic_dev->mgmt_lock);
+#define SDI_VM_BPS_100MB 12500000
+#define SDI_VM_BPS_1GB 125000000
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
+static void update_queue_coal_sdi_vm(struct hinic_nic_dev *nic_dev,
+ u16 qid, u64 rx_pps, u64 rx_bps,
+ u64 tx_pps, u64 tx_bps)
+{
+ struct hinic_intr_coal_info *q_coal = NULL;
+ u8 coalesc_timer_cfg, pending_limt;
+
+ q_coal = &nic_dev->intr_coalesce[qid];
+ if (qp_coalesc_use_drv_params_switch == 0) {
+ if (rx_pps < SDI_VM_PPS_3W &&
+ tx_pps < SDI_VM_PPS_3W &&
+ rx_bps < SDI_VM_BPS_100MB &&
+ tx_bps < SDI_VM_BPS_100MB) {
+ set_interrupt_moder(nic_dev, qid, 0, 0);
+ } else if (tx_pps > SDI_VM_PPS_3W &&
+ tx_pps < SDI_VM_PPS_5W &&
+ tx_bps > SDI_VM_BPS_1GB) {
+ set_interrupt_moder(nic_dev, qid, 7, 7);
+ } else {
+ __calc_coal_para(nic_dev, q_coal, rx_pps,
+ &coalesc_timer_cfg,
+ &pending_limt);
+ set_interrupt_moder(nic_dev, qid,
+ coalesc_timer_cfg,
+ pending_limt);
+ }
+ } else {
+ __calc_coal_para(nic_dev, q_coal, rx_pps,
+ &coalesc_timer_cfg,
+ &pending_limt);
+ set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg,
+ pending_limt);
+ }
+}
- hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
+static void hinic_auto_moderation_work(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct hinic_nic_dev *nic_dev = container_of(delay,
+ struct hinic_nic_dev,
+ moderation_task);
+ unsigned long period = (unsigned long)(jiffies -
+ nic_dev->last_moder_jiffies);
+ u64 rx_packets, rx_bytes, rx_pkt_diff, rate, avg_pkt_size;
+ u64 tx_packets, tx_bytes, tx_pkt_diff, tx_rate, rx_bps, tx_bps;
+ u16 qid;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags))
+ return;
- hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
+ queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task,
+ HINIC_MODERATONE_DELAY);
- if (nic_dev->flags & HINIC_RSS_ENABLE) {
- hinic_rss_deinit(nic_dev);
- hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
- }
+ if (!nic_dev->adaptive_rx_coal || !period)
+ return;
- free_rxqs(nic_dev);
- free_txqs(nic_dev);
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ rx_packets = nic_dev->rxqs[qid].rxq_stats.packets;
+ rx_bytes = nic_dev->rxqs[qid].rxq_stats.bytes;
+ tx_packets = nic_dev->txqs[qid].txq_stats.packets;
+ tx_bytes = nic_dev->txqs[qid].txq_stats.bytes;
+
+ rx_pkt_diff =
+ rx_packets - nic_dev->rxqs[qid].last_moder_packets;
+ avg_pkt_size = rx_pkt_diff ?
+ ((unsigned long)(rx_bytes -
+ nic_dev->rxqs[qid].last_moder_bytes)) /
+ rx_pkt_diff : 0;
+
+ rate = rx_pkt_diff * HZ / period;
+ tx_pkt_diff =
+ tx_packets - nic_dev->txqs[qid].last_moder_packets;
+ tx_rate = tx_pkt_diff * HZ / period;
+
+ rx_bps = (unsigned long)(rx_bytes -
+ nic_dev->rxqs[qid].last_moder_bytes)
+ * HZ / period;
+ tx_bps = (unsigned long)(tx_bytes -
+ nic_dev->txqs[qid].last_moder_bytes)
+ * HZ / period;
+ if ((nic_dev->is_vm_slave && nic_dev->in_vm) ||
+ nic_dev->is_bm_slave) {
+ update_queue_coal_sdi_vm(nic_dev, qid, rate, rx_bps,
+ tx_rate, tx_bps);
+ } else {
+ update_queue_coal(nic_dev, qid, rate, avg_pkt_size,
+ tx_rate);
+ }
- if (flags & HINIC_INTF_UP)
- hinic_hwdev_ifdown(nic_dev->hwdev);
+ nic_dev->rxqs[qid].last_moder_packets = rx_packets;
+ nic_dev->rxqs[qid].last_moder_bytes = rx_bytes;
+ nic_dev->txqs[qid].last_moder_packets = tx_packets;
+ nic_dev->txqs[qid].last_moder_bytes = tx_bytes;
+ }
- netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n");
- return 0;
+ nic_dev->last_moder_jiffies = jiffies;
}
-static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
+static void hinic_release_irq(struct hinic_irq *irq_cfg)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err;
-
- netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu);
-
- err = hinic_port_set_mtu(nic_dev, new_mtu);
- if (err)
- netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
- else
- netdev->mtu = new_mtu;
-
- return err;
+ irq_set_affinity_hint(irq_cfg->irq_id, NULL);
+ synchronize_irq(irq_cfg->irq_id);
+ free_irq(irq_cfg->irq_id, irq_cfg);
+ qp_del_napi(irq_cfg);
}
-/**
- * change_mac_addr - change the main mac address of network device
- * @netdev: network device
- * @addr: mac address to set
- *
- * Return 0 - Success, negative - Failure
- **/
-static int change_mac_addr(struct net_device *netdev, const u8 *addr)
+static int hinic_qps_irq_init(struct hinic_nic_dev *nic_dev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 vid = 0;
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct irq_info *qp_irq_info;
+ struct hinic_irq *irq_cfg;
+ u16 q_id, i;
+ u32 local_cpu;
int err;
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
-
- netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ nic_dev->irq_cfg = kcalloc(nic_dev->num_qps, sizeof(*nic_dev->irq_cfg),
+ GFP_KERNEL);
+ if (!nic_dev->irq_cfg)
+ return -ENOMEM;
- down(&nic_dev->mgmt_lock);
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ qp_irq_info = &nic_dev->qps_irq_info[q_id];
+ irq_cfg = &nic_dev->irq_cfg[q_id];
+
+ irq_cfg->irq_id = qp_irq_info->irq_id;
+ irq_cfg->msix_entry_idx = qp_irq_info->msix_entry_idx;
+ irq_cfg->netdev = nic_dev->netdev;
+ irq_cfg->txq = &nic_dev->txqs[q_id];
+ irq_cfg->rxq = &nic_dev->rxqs[q_id];
+ nic_dev->rxqs[q_id].irq_cfg = irq_cfg;
+
+ if (nic_dev->force_affinity) {
+ irq_cfg->affinity_mask = nic_dev->affinity_mask;
+ } else {
+ local_cpu =
+ cpumask_local_spread(q_id,
+ dev_to_node(&pdev->dev));
+ cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask);
+ }
- do {
- err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to delete mac\n");
- break;
+ err = snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name),
+ "%s_qp%d", nic_dev->netdev->name, q_id);
+ if (err <= 0 || err >= (int)sizeof(irq_cfg->irq_name)) {
+ nic_err(&pdev->dev,
+ "Failed snprintf irq_name, function return(%d) and dest_len(%d)\n",
+ err, (int)sizeof(irq_cfg->irq_name));
+ goto req_tx_irq_err;
}
- err = hinic_port_add_mac(nic_dev, addr, vid);
+ set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag);
+
+ err = hinic_request_irq(irq_cfg, q_id);
if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
- break;
+ nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to request Rx irq\n");
+ goto req_tx_irq_err;
}
- vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
- } while (vid != VLAN_N_VID);
+ hinic_set_msix_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC_MSIX_ENABLE);
+ }
- up(&nic_dev->mgmt_lock);
- return err;
-}
+ INIT_DELAYED_WORK(&nic_dev->moderation_task,
+ hinic_auto_moderation_work);
-static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
-{
- unsigned char new_mac[ETH_ALEN];
- struct sockaddr *saddr = addr;
- int err;
+ return 0;
- memcpy(new_mac, saddr->sa_data, ETH_ALEN);
+req_tx_irq_err:
+ for (i = 0; i < q_id; i++) {
+ hinic_set_msix_state(nic_dev->hwdev,
+ nic_dev->irq_cfg[i].msix_entry_idx,
+ HINIC_MSIX_DISABLE);
+ hinic_release_irq(&nic_dev->irq_cfg[i]);
+ }
- err = change_mac_addr(netdev, new_mac);
- if (!err)
- eth_hw_addr_set(netdev, new_mac);
+ kfree(nic_dev->irq_cfg);
return err;
}
-/**
- * add_mac_addr - add mac address to network device
- * @netdev: network device
- * @addr: mac address to add
- *
- * Return 0 - Success, negative - Failure
- **/
-static int add_mac_addr(struct net_device *netdev, const u8 *addr)
+static void hinic_qps_irq_deinit(struct hinic_nic_dev *nic_dev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 vid = 0;
- int err;
-
- netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
-
- down(&nic_dev->mgmt_lock);
-
- do {
- err = hinic_port_add_mac(nic_dev, addr, vid);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
- break;
- }
+ u16 q_id;
- vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
- } while (vid != VLAN_N_VID);
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ hinic_set_msix_state(nic_dev->hwdev,
+ nic_dev->irq_cfg[q_id].msix_entry_idx,
+ HINIC_MSIX_DISABLE);
+ hinic_release_irq(&nic_dev->irq_cfg[q_id]);
+ }
- up(&nic_dev->mgmt_lock);
- return err;
+ kfree(nic_dev->irq_cfg);
}
-/**
- * remove_mac_addr - remove mac address from network device
- * @netdev: network device
- * @addr: mac address to remove
- *
- * Return 0 - Success, negative - Failure
- **/
-static int remove_mac_addr(struct net_device *netdev, const u8 *addr)
+int hinic_force_port_disable(struct hinic_nic_dev *nic_dev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 vid = 0;
int err;
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
-
- netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ down(&nic_dev->port_state_sem);
- down(&nic_dev->mgmt_lock);
-
- do {
- err = hinic_port_del_mac(nic_dev, addr, vid);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to delete mac\n");
- break;
- }
+ err = hinic_set_port_enable(nic_dev->hwdev, false);
+ if (!err)
+ nic_dev->force_port_disable = true;
- vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
- } while (vid != VLAN_N_VID);
+ up(&nic_dev->port_state_sem);
- up(&nic_dev->mgmt_lock);
return err;
}
-static int hinic_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int ret, err;
-
- netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid);
-
- down(&nic_dev->mgmt_lock);
-
- err = hinic_port_add_vlan(nic_dev, vid);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to add vlan\n");
- goto err_vlan_add;
- }
-
- err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid);
- if (err && err != HINIC_PF_SET_VF_ALREADY) {
- netif_err(nic_dev, drv, netdev, "Failed to set mac\n");
- goto err_add_mac;
- }
+ int err = 0;
- bitmap_set(nic_dev->vlan_bitmap, vid, 1);
+ down(&nic_dev->port_state_sem);
- up(&nic_dev->mgmt_lock);
- return 0;
+ nic_dev->force_port_disable = false;
+ err = hinic_set_port_enable(nic_dev->hwdev, enable);
-err_add_mac:
- ret = hinic_port_del_vlan(nic_dev, vid);
- if (ret)
- netif_err(nic_dev, drv, netdev,
- "Failed to revert by removing vlan\n");
+ up(&nic_dev->port_state_sem);
-err_vlan_add:
- up(&nic_dev->mgmt_lock);
return err;
}
-static int hinic_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
int err;
- netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid);
-
- down(&nic_dev->mgmt_lock);
+ down(&nic_dev->port_state_sem);
- err = hinic_port_del_vlan(nic_dev, vid);
- if (err) {
- netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n");
- goto err_del_vlan;
+ /* Do nothing when force disable
+ * Port will disable when call force port disable
+ * and should not enable port when in force mode
+ */
+ if (nic_dev->force_port_disable) {
+ up(&nic_dev->port_state_sem);
+ return 0;
}
- bitmap_clear(nic_dev->vlan_bitmap, vid, 1);
+ err = hinic_set_port_enable(nic_dev->hwdev, enable);
- up(&nic_dev->mgmt_lock);
- return 0;
+ up(&nic_dev->port_state_sem);
-err_del_vlan:
- up(&nic_dev->mgmt_lock);
return err;
}
-static void set_rx_mode(struct work_struct *work)
+static void hinic_print_link_message(struct hinic_nic_dev *nic_dev,
+ u8 link_status)
{
- struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
- struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
+ if (nic_dev->link_status == link_status)
+ return;
- hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode);
+ nic_dev->link_status = link_status;
- __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
- __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
+ nicif_info(nic_dev, link, nic_dev->netdev, "Link is %s\n",
+ (link_status ? "up" : "down"));
}
-static void hinic_set_rx_mode(struct net_device *netdev)
+int hinic_open(struct net_device *netdev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rx_mode_work *rx_mode_work;
- u32 rx_mode;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 link_status = 0;
+ int err;
+
+ if (test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_info(nic_dev, drv, netdev, "Netdev already open, do nothing\n");
+ return 0;
+ }
+
+ err = hinic_setup_num_qps(nic_dev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to setup num_qps\n");
+ return err;
+ }
+
+ err = hinic_create_qps(nic_dev->hwdev, nic_dev->num_qps,
+ nic_dev->sq_depth, nic_dev->rq_depth,
+ nic_dev->qps_irq_info, HINIC_MAX_SQ_BUFDESCS);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to create queue pairs\n");
+ goto create_qps_err;
+ }
+
+ err = hinic_setup_qps_resources(nic_dev);
+ if (err)
+ goto setup_qps_resources_err;
+
+ err = hinic_init_qp_ctxts(nic_dev->hwdev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to init qp ctxts\n");
+ goto init_qp_ctxts_err;
+ }
+
+ err = hinic_set_port_mtu(nic_dev->hwdev, netdev->mtu);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n");
+ goto mtu_err;
+ }
+
+ err = hinic_configure(nic_dev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to configure txrx\n");
+ goto cfg_err;
+ }
+
+ err = hinic_qps_irq_init(nic_dev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to qps irq init\n");
+ goto qps_irqs_init_err;
+ }
+
+ err = hinic_set_vport_enable(nic_dev->hwdev, true);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n");
+ goto vport_enable_err;
+ }
+
+ err = hinic_maybe_set_port_state(nic_dev, true);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to enable port\n");
+ goto port_enable_err;
+ }
+
+ set_bit(HINIC_INTF_UP, &nic_dev->flags);
+
+ netif_set_real_num_tx_queues(netdev, nic_dev->num_qps);
+ netif_set_real_num_rx_queues(netdev, nic_dev->num_qps);
+ netif_tx_wake_all_queues(netdev);
+
+ queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task,
+ HINIC_MODERATONE_DELAY);
+
+ err = hinic_get_link_state(nic_dev->hwdev, &link_status);
+ if (!err && link_status) {
+ hinic_update_pf_bw(nic_dev->hwdev);
+ netif_carrier_on(netdev);
+ }
+
+ hinic_print_link_message(nic_dev, link_status);
+
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_status);
+
+ nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n");
+
+ return 0;
+
+port_enable_err:
+ hinic_set_vport_enable(nic_dev->hwdev, false);
+
+vport_enable_err:
+ hinic_flush_sq_res(nic_dev->hwdev);
+ /* After set vport disable 100ms, no packets will be send to host */
+ msleep(100);
+ hinic_qps_irq_deinit(nic_dev);
+
+qps_irqs_init_err:
+ hinic_remove_configure(nic_dev);
+
+cfg_err:
+mtu_err:
+ hinic_free_qp_ctxts(nic_dev->hwdev);
+
+init_qp_ctxts_err:
+ hinic_free_all_rx_resources(netdev);
+ hinic_free_all_tx_resources(netdev);
+
+setup_qps_resources_err:
+ hinic_free_qps(nic_dev->hwdev);
+
+create_qps_err:
+ hinic_destroy_num_qps(nic_dev);
+
+ return err;
+}
+
+int hinic_close(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (!test_and_clear_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n");
+ return 0;
+ }
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ cancel_delayed_work_sync(&nic_dev->moderation_task);
+
+ if (hinic_get_chip_present_flag(nic_dev->hwdev)) {
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
+
+ hinic_maybe_set_port_state(nic_dev, false);
+
+ hinic_set_vport_enable(nic_dev->hwdev, false);
- rx_mode_work = &nic_dev->rx_mode_work;
+ hinic_flush_txqs(netdev);
+ hinic_flush_sq_res(nic_dev->hwdev);
+ /* After set vport disable 100ms,
+ * no packets will be send to host
+ */
+ msleep(100);
+ }
+
+ hinic_qps_irq_deinit(nic_dev);
+ hinic_remove_configure(nic_dev);
+
+ if (hinic_get_chip_present_flag(nic_dev->hwdev))
+ hinic_free_qp_ctxts(nic_dev->hwdev);
+
+ mutex_lock(&nic_dev->nic_mutex);
+ hinic_free_all_rx_resources(netdev);
+
+ hinic_free_all_tx_resources(netdev);
+
+ hinic_free_qps(nic_dev->hwdev);
+
+ hinic_destroy_num_qps(nic_dev);
+ mutex_unlock(&nic_dev->nic_mutex);
+
+ nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n");
+
+ return 0;
+}
+
+static inline u32 calc_toeplitz_rss(u32 sip, u32 dip, u32 sport, u32 dport,
+ const u32 *rss_key)
+{
+ u32 i, port, rss = 0;
+
+ port = (sport << 16) | dport;
+
+ /* The key - SIP, DIP, SPORT, DPORT */
+ for (i = 0; i < 32; i++)
+ if (sip & ((u32)1 << (u32)(31 - i)))
+ rss ^= (rss_key[0] << i) |
+ (u32)((u64)rss_key[1] >> (32 - i));
+
+ for (i = 0; i < 32; i++)
+ if (dip & ((u32)1 << (u32)(31 - i)))
+ rss ^= (rss_key[1] << i) |
+ (u32)((u64)rss_key[2] >> (32 - i));
- rx_mode = HINIC_RX_MODE_UC |
- HINIC_RX_MODE_MC |
- HINIC_RX_MODE_BC;
+ for (i = 0; i < 32; i++)
+ if (port & ((u32)1 << (u32)(31 - i)))
+ rss ^= (rss_key[2] << i) |
+ (u32)((u64)rss_key[3] >> (32 - i));
- if (netdev->flags & IFF_PROMISC) {
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- rx_mode |= HINIC_RX_MODE_PROMISC;
- } else if (netdev->flags & IFF_ALLMULTI) {
- rx_mode |= HINIC_RX_MODE_MC_ALL;
+ return rss;
+}
+
+static u16 select_queue_by_toeplitz(struct net_device *dev,
+ struct sk_buff *skb,
+ unsigned int num_tx_queues)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(dev);
+ struct tcphdr *tcphdr;
+ struct iphdr *iphdr;
+ u32 hash = 0;
+
+ if (skb_rx_queue_recorded(skb)) {
+ hash = skb_get_rx_queue(skb);
+ while (unlikely(hash >= num_tx_queues))
+ hash -= num_tx_queues;
+ return (u16)hash;
+ }
+
+ /*lint -save -e778*/
+ if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
+ iphdr = ip_hdr(skb);
+ if (iphdr->protocol == IPPROTO_UDP ||
+ iphdr->protocol == IPPROTO_TCP) {
+ tcphdr = tcp_hdr(skb);
+ hash = calc_toeplitz_rss(ntohl(iphdr->daddr),
+ ntohl(iphdr->saddr),
+ ntohs(tcphdr->dest),
+ ntohs(tcphdr->source),
+ nic_dev->rss_hkey_user_be);
+ }
}
+ /*lint -restore*/
+
+ return (u16)nic_dev->rss_indir_user[hash & 0xFF];
+}
+
+static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ skb->priority = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+ if (netdev_get_num_tc(netdev) || !nic_dev->rss_hkey_user_be)
+ goto fallback;
+
+ if (nic_dev->rss_hash_engine == HINIC_RSS_HASH_ENGINE_TYPE_TOEP &&
+ test_bit(HINIC_SAME_RXTX, &nic_dev->flags))
+ return select_queue_by_toeplitz(netdev, skb,
+ netdev->real_num_tx_queues);
+
+fallback:
+ return netdev_pick_tx(netdev, skb, NULL);
+}
+
+static void hinic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_txq_stats *txq_stats;
+ struct hinic_rxq_stats *rxq_stats;
+ struct hinic_txq *txq;
+ struct hinic_rxq *rxq;
+ u64 bytes, packets, dropped, errors;
+ unsigned int start;
+ int i;
- rx_mode_work->rx_mode = rx_mode;
+ bytes = 0;
+ packets = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->txqs)
+ break;
+
+ txq = &nic_dev->txqs[i];
+ txq_stats = &txq->txq_stats;
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ bytes += txq_stats->bytes;
+ packets += txq_stats->packets;
+ dropped += txq_stats->dropped;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ }
+ stats->tx_packets = packets;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = dropped;
+
+ bytes = 0;
+ packets = 0;
+ errors = 0;
+ dropped = 0;
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ if (!nic_dev->rxqs)
+ break;
- queue_work(nic_dev->workq, &rx_mode_work->work);
+ rxq = &nic_dev->rxqs[i];
+ rxq_stats = &rxq->rxq_stats;
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ bytes += rxq_stats->bytes;
+ packets += rxq_stats->packets;
+ errors += rxq_stats->csum_errors +
+ rxq_stats->other_errors;
+ dropped += rxq_stats->dropped;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ }
+ stats->rx_packets = packets;
+ stats->rx_bytes = bytes;
+ stats->rx_errors = errors;
+ stats->rx_dropped = dropped;
}
static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 sw_pi, hw_ci, sw_ci;
- struct hinic_sq *sq;
- u16 num_sqs, q_id;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 msix_idx;
+ u8 q_id;
+
+ HINIC_NIC_STATS_INC(nic_dev, netdev_tx_timeout);
+ nicif_err(nic_dev, drv, netdev, "Tx timeout\n");
+
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id)))
+ continue;
+
+ msix_idx = nic_dev->irq_cfg[q_id].msix_entry_idx;
+ nicif_info(nic_dev, drv, netdev,
+ "txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx, msix mask: %d, intr_flag: 0x%lx\n",
+ q_id, hinic_dbg_get_sq_pi(nic_dev->hwdev, q_id),
+ hinic_get_sq_hw_ci(nic_dev->hwdev, q_id),
+ hinic_get_sq_local_ci(nic_dev->hwdev, q_id),
+ nic_dev->irq_cfg[q_id].napi.state,
+ hinic_get_msix_state(nic_dev->hwdev, msix_idx),
+ nic_dev->irq_cfg[q_id].intr_flag);
+ }
+}
+
+static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u32 mtu = (u32)new_mtu;
+ int err = 0;
+
+ err = hinic_set_port_mtu(nic_dev->hwdev, mtu);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n",
+ new_mtu);
+ } else {
+ nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = mtu;
+ }
+
+ return err;
+}
+
+static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ u16 func_id;
+ int err;
+
+ if (!FUNC_SUPPORT_CHANGE_MAC(nic_dev->hwdev)) {
+ nicif_warn(nic_dev, drv, netdev,
+ "Current function don't support to set mac\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) {
+ nicif_info(nic_dev, drv, netdev,
+ "Already using mac address %pM\n",
+ saddr->sa_data);
+ return 0;
+ }
+
+ err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
+ if (err)
+ return err;
+
+ err = hinic_update_mac(nic_dev->hwdev, netdev->dev_addr, saddr->sa_data,
+ 0, func_id);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to update mac, err: %d\n",
+ err);
+ return err == HINIC_PF_SET_VF_ALREADY ? -EPERM : err;
+ }
+
+ memcpy(netdev->dev_addr, saddr->sa_data, ETH_ALEN);
+
+ nicif_info(nic_dev, drv, netdev, "Set new mac address %pM\n",
+ saddr->sa_data);
+
+ return 0;
+}
+
+static int
+hinic_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto,
+ u16 vid)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
+ u16 func_id;
+ u32 col, line;
+ int err;
+
+ col = VID_COL(nic_dev, vid);
+ line = VID_LINE(nic_dev, vid);
+
+ err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
+ if (err)
+ goto end;
+
+ err = hinic_add_vlan(nic_dev->hwdev, vid, func_id);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to add vlan%d\n", vid);
+ goto end;
+ }
+
+ set_bit(col, &vlan_bitmap[line]);
+
+ nicif_info(nic_dev, drv, netdev, "Add vlan %d\n", vid);
+
+end:
+ return err;
+}
+
+static int
+hinic_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto,
+ u16 vid)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned long *vlan_bitmap = nic_dev->vlan_bitmap;
+ u16 func_id;
+ int err, col, line;
+
+ col = VID_COL(nic_dev, vid);
+ line = VID_LINE(nic_dev, vid);
+
+ err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
+ if (err)
+ goto end;
+
+ err = hinic_del_vlan(nic_dev->hwdev, vid, func_id);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to delete vlan\n");
+ goto end;
+ }
+
+ clear_bit(col, &vlan_bitmap[line]);
+
+ nicif_info(nic_dev, drv, netdev, "Remove vlan %d\n", vid);
+
+end:
+ return err;
+}
+
+#define FEATURES_OP_STR(op) ((op) ? "Enable" : "Disable")
+
+static int set_feature_tso(struct hinic_nic_dev *nic_dev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ bool en = !!(wanted_features & NETIF_F_TSO);
+ int err;
+
+ if (!(changed & NETIF_F_TSO))
+ return 0;
+
+ err = hinic_set_tx_tso(nic_dev->hwdev, en);
+ if (err) {
+ hinic_err(nic_dev, drv, "%s tso failed\n", FEATURES_OP_STR(en));
+ *failed_features |= NETIF_F_TSO;
+ } else {
+ hinic_info(nic_dev, drv, "%s tso success\n",
+ FEATURES_OP_STR(en));
+ }
+
+ return err;
+}
+
+static int set_feature_cvlan(struct hinic_nic_dev *nic_dev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX;
+ bool en = !!(wanted_features & vlan_feature);
+ int err;
+
+ if (!(changed & vlan_feature))
+ return 0;
+
+ err = hinic_set_rx_vlan_offload(nic_dev->hwdev, en);
+ if (err) {
+ hinic_err(nic_dev, drv, "%s rxvlan failed\n",
+ FEATURES_OP_STR(en));
+ *failed_features |= vlan_feature;
+ } else {
+ hinic_info(nic_dev, drv, "%s rxvlan success\n",
+ FEATURES_OP_STR(en));
+ }
+
+ return err;
+}
+
+static int set_feature_rxcsum(struct hinic_nic_dev *nic_dev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ bool en = !!(wanted_features & NETIF_F_RXCSUM);
+ int err;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ /* hw should always enable rx csum */
+ err = hinic_set_rx_csum_offload(nic_dev->hwdev,
+ HINIC_RX_CSUM_OFFLOAD_EN);
+ if (err) {
+ hinic_err(nic_dev, drv, "%s rx csum failed\n",
+ FEATURES_OP_STR(en));
+ *failed_features |= NETIF_F_RXCSUM;
+ } else {
+ hinic_info(nic_dev, drv, "%s rx csum success\n",
+ FEATURES_OP_STR(en));
+ }
+
+ return err;
+}
+
+static int set_feature_lro(struct hinic_nic_dev *nic_dev,
+ netdev_features_t wanted_features,
+ netdev_features_t features,
+ netdev_features_t *failed_features)
+{
+ netdev_features_t changed = wanted_features ^ features;
+ bool en = !!(wanted_features & NETIF_F_LRO);
+ u32 lro_timer, lro_buf_size;
+ int err;
+
+ if (!(changed & NETIF_F_LRO))
+ return 0;
+
+ lro_timer = nic_dev->adaptive_cfg.lro.timer;
+ lro_buf_size = nic_dev->adaptive_cfg.lro.buffer_size;
+ err = hinic_set_rx_lro_state(nic_dev->hwdev, en, lro_timer,
+ lro_buf_size / nic_dev->rx_buff_len);
+ if (err) {
+ hinic_err(nic_dev, drv, "%s lro failed\n", FEATURES_OP_STR(en));
+ *failed_features |= NETIF_F_LRO;
+ } else {
+ hinic_info(nic_dev, drv, "%s lro success\n",
+ FEATURES_OP_STR(en));
+ }
+
+ return err;
+}
+
+static int set_features(struct hinic_nic_dev *nic_dev,
+ netdev_features_t pre_features,
+ netdev_features_t features)
+{
+ netdev_features_t failed_features = 0;
+ u32 err;
+
+ err = (u32)set_feature_tso(nic_dev, features, pre_features,
+ &failed_features);
+ err |= (u32)set_feature_cvlan(nic_dev, features, pre_features,
+ &failed_features);
+ err |= (u32)set_feature_rxcsum(nic_dev, features, pre_features,
+ &failed_features);
+ err |= (u32)set_feature_lro(nic_dev, features, pre_features,
+ &failed_features);
+ if (err) {
+ nic_dev->netdev->features = features ^ failed_features;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hinic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ return set_features(nic_dev, nic_dev->netdev->features,
+ features);
+}
+
+static netdev_features_t hinic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev)
+{
+ int err;
+
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ if (FUNC_SUPPORT_DCB(nic_dev->hwdev)) {
+ err = hinic_dcb_reset_hw_config(nic_dev);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to reset hw dcb configuration\n");
+ return -EFAULT;
+ }
+ }
+
+ if (FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ err = hinic_reset_port_link_cfg(nic_dev->hwdev);
+ if (err)
+ return -EFAULT;
+ }
+
+ hinic_set_anti_attack(nic_dev->hwdev, true);
+
+ if (set_link_status_follow < HINIC_LINK_FOLLOW_STATUS_MAX &&
+ FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) {
+ err = hinic_set_link_status_follow(nic_dev->hwdev,
+ set_link_status_follow);
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED)
+ nic_warn(&nic_dev->pdev->dev,
+ "Current version of firmware don't support to set link status follow port status\n");
+ }
+ }
+
+ /* enable all hw features in netdev->features */
+ return set_features(nic_dev, ~nic_dev->netdev->features,
+ nic_dev->netdev->features);
+}
+
+static int hinic_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt *mqprio)
+{
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ return hinic_setup_tc(dev, mqprio->num_tc);
+}
+
+static int __hinic_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return hinic_setup_tc_mqprio(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void hinic_netpoll(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i;
+
+ for (i = 0; i < nic_dev->num_qps; i++)
+ napi_schedule(&nic_dev->irq_cfg[i].napi);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int hinic_uc_sync(struct net_device *netdev, u8 *addr)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 func_id;
+ int err;
+
+ err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
+ if (err)
+ return err;
+
+ err = hinic_set_mac(nic_dev->hwdev, addr, 0, func_id);
+ return err;
+}
+
+static int hinic_uc_unsync(struct net_device *netdev, u8 *addr)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 func_id;
+ int err;
+
+ /* The addr is in use */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
+ err = hinic_global_func_id_get(nic_dev->hwdev, &func_id);
+ if (err)
+ return err;
+
+ err = hinic_del_mac(nic_dev->hwdev, addr, 0, func_id);
+ return err;
+}
+
+static void hinic_clean_mac_list_filter(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ struct hinic_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) {
+ if (f->state == HINIC_MAC_HW_SYNCED)
+ hinic_uc_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) {
+ if (f->state == HINIC_MAC_HW_SYNCED)
+ hinic_uc_unsync(netdev, f->addr);
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+static struct hinic_mac_filter *hinic_find_mac(struct list_head *filter_list,
+ u8 *addr)
+{
+ struct hinic_mac_filter *f;
+
+ list_for_each_entry(f, filter_list, list) {
+ if (ether_addr_equal(addr, f->addr))
+ return f;
+ }
+ return NULL;
+}
+
+static struct hinic_mac_filter
+ *hinic_add_filter(struct hinic_nic_dev *nic_dev,
+ struct list_head *mac_filter_list, u8 *addr)
+{
+ struct hinic_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ goto out;
+
+ memcpy(f->addr, addr, ETH_ALEN);
+
+ INIT_LIST_HEAD(&f->list);
+ list_add_tail(&f->list, mac_filter_list);
+
+ f->state = HINIC_MAC_WAIT_HW_SYNC;
+ set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags);
+
+out:
+ return f;
+}
+
+static void hinic_del_filter(struct hinic_nic_dev *nic_dev,
+ struct hinic_mac_filter *f)
+{
+ set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags);
+
+ if (f->state == HINIC_MAC_WAIT_HW_SYNC) {
+ /* have not added to hw, delete it directly */
+ list_del(&f->list);
+ kfree(f);
+ return;
+ }
+
+ f->state = HINIC_MAC_WAIT_HW_UNSYNC;
+}
+
+static struct hinic_mac_filter
+ *hinic_mac_filter_entry_clone(struct hinic_mac_filter *src)
+{
+ struct hinic_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+
+ *f = *src;
+ INIT_LIST_HEAD(&f->list);
+
+ return f;
+}
+
+static void hinic_undo_del_filter_entries(struct list_head *filter_list,
+ struct list_head *from)
+{
+ struct hinic_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ if (hinic_find_mac(filter_list, f->addr))
+ continue;
+
+ if (f->state == HINIC_MAC_HW_SYNCED)
+ f->state = HINIC_MAC_WAIT_HW_UNSYNC;
+
+ list_move_tail(&f->list, filter_list);
+ }
+}
+
+static void hinic_undo_add_filter_entries(struct list_head *filter_list,
+ struct list_head *from)
+{
+ struct hinic_mac_filter *f, *ftmp, *tmp;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ tmp = hinic_find_mac(filter_list, f->addr);
+ if (tmp && tmp->state == HINIC_MAC_HW_SYNCED)
+ tmp->state = HINIC_MAC_WAIT_HW_SYNC;
+ }
+}
+
+static void hinic_cleanup_filter_list(struct list_head *head)
+{
+ struct hinic_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, head, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+static int hinic_mac_filter_sync_hw(struct hinic_nic_dev *nic_dev,
+ struct list_head *del_list,
+ struct list_head *add_list)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ struct hinic_mac_filter *f, *ftmp;
+ int err = 0, add_count = 0;
+
+ if (!list_empty(del_list)) {
+ list_for_each_entry_safe(f, ftmp, del_list, list) {
+ err = hinic_uc_unsync(netdev, f->addr);
+ if (err) { /* ignore errors when delete mac */
+ nic_err(&nic_dev->pdev->dev, "Failed to delete mac\n");
+ }
+
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+
+ if (!list_empty(add_list)) {
+ list_for_each_entry_safe(f, ftmp, add_list, list) {
+ err = hinic_uc_sync(netdev, f->addr);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to add mac\n");
+ return err;
+ }
+
+ add_count++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+
+ return add_count;
+}
+
+static int hinic_mac_filter_sync(struct hinic_nic_dev *nic_dev,
+ struct list_head *mac_filter_list, bool uc)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ struct list_head tmp_del_list, tmp_add_list;
+ struct hinic_mac_filter *f, *ftmp, *fclone;
+ int err = 0, add_count = 0;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+ INIT_LIST_HEAD(&tmp_add_list);
+
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC_MAC_WAIT_HW_UNSYNC)
+ continue;
+
+ f->state = HINIC_MAC_HW_UNSYNCED;
+ list_move_tail(&f->list, &tmp_del_list);
+ }
+
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC_MAC_WAIT_HW_SYNC)
+ continue;
+
+ fclone = hinic_mac_filter_entry_clone(f);
+ if (!fclone) {
+ err = -ENOMEM;
+ break;
+ }
+
+ f->state = HINIC_MAC_HW_SYNCED;
+ list_add_tail(&fclone->list, &tmp_add_list);
+ }
+
+ if (err) {
+ hinic_undo_del_filter_entries(mac_filter_list, &tmp_del_list);
+ hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list);
+ nicif_err(nic_dev, drv, netdev, "Failed to clone mac_filter_entry\n");
+ }
+
+ if (err) {
+ hinic_cleanup_filter_list(&tmp_del_list);
+ hinic_cleanup_filter_list(&tmp_add_list);
+ return -ENOMEM;
+ }
+
+ add_count =
+ hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list);
+ if (list_empty(&tmp_add_list))
+ return add_count;
+
+ /* there are errors when add mac to hw, delete all mac in hw */
+ hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list);
+ /* VF don't support to enter promisc mode,
+ * so we can't delete any other uc mac
+ */
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) {
+ list_for_each_entry_safe(f, ftmp, mac_filter_list, list) {
+ if (f->state != HINIC_MAC_HW_SYNCED)
+ continue;
+
+ fclone = hinic_mac_filter_entry_clone(f);
+ if (!fclone)
+ break;
+
+ f->state = HINIC_MAC_WAIT_HW_SYNC;
+ list_add_tail(&fclone->list, &tmp_del_list);
+ }
+ }
+
+ hinic_cleanup_filter_list(&tmp_add_list);
+ hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list);
+
+ /* need to enter promisc/allmulti mode */
+ return -ENOMEM;
+}
+
+static void hinic_mac_filter_sync_all(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ int add_count;
+
+ if (test_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags)) {
+ clear_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags);
+ add_count = hinic_mac_filter_sync(nic_dev,
+ &nic_dev->uc_filter_list,
+ true);
+ if (add_count < 0 && !HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ set_bit(HINIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state);
+ nicif_info(nic_dev, drv, netdev, "Promisc mode forced on\n");
+ } else if (add_count) {
+ clear_bit(HINIC_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ }
+
+ add_count = hinic_mac_filter_sync(nic_dev,
+ &nic_dev->mc_filter_list,
+ false);
+ if (add_count < 0) {
+ set_bit(HINIC_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ nicif_info(nic_dev, drv, netdev, "All multicast mode forced on\n");
+ } else if (add_count) {
+ clear_bit(HINIC_ALLMULTI_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ }
+ }
+}
+
+#define HINIC_DEFAULT_RX_MODE (HINIC_RX_MODE_UC | HINIC_RX_MODE_MC | \
+ HINIC_RX_MODE_BC)
+
+static void hinic_update_mac_filter(struct hinic_nic_dev *nic_dev,
+ struct netdev_hw_addr_list *src_list,
+ struct list_head *filter_list)
+{
+ struct netdev_hw_addr *ha;
+ struct hinic_mac_filter *f, *ftmp, *filter;
+
+ /* add addr if not already in the filter list */
+ netif_addr_lock_bh(nic_dev->netdev);
+ netdev_hw_addr_list_for_each(ha, src_list) {
+ filter = hinic_find_mac(filter_list, ha->addr);
+ if (!filter)
+ hinic_add_filter(nic_dev, filter_list, ha->addr);
+ else if (filter->state == HINIC_MAC_WAIT_HW_UNSYNC)
+ filter->state = HINIC_MAC_HW_SYNCED;
+ }
+ netif_addr_unlock_bh(nic_dev->netdev);
+
+ /* delete addr if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, filter_list, list) {
+ bool found = false;
+
+ netif_addr_lock_bh(nic_dev->netdev);
+ netdev_hw_addr_list_for_each(ha, src_list)
+ if (ether_addr_equal(ha->addr, f->addr)) {
+ found = true;
+ break;
+ }
+ netif_addr_unlock_bh(nic_dev->netdev);
+
+ if (found)
+ continue;
+
+ hinic_del_filter(nic_dev, f);
+ }
+}
+
+static void __update_mac_filter(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+
+ if (test_and_clear_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags)) {
+ hinic_update_mac_filter(nic_dev, &netdev->uc,
+ &nic_dev->uc_filter_list);
+ hinic_update_mac_filter(nic_dev, &netdev->mc,
+ &nic_dev->mc_filter_list);
+ }
+}
+
+static void hinic_set_rx_mode_work(struct work_struct *work)
+{
+ struct hinic_nic_dev *nic_dev =
+ container_of(work, struct hinic_nic_dev, rx_mode_work);
+ struct net_device *netdev = nic_dev->netdev;
+ int promisc_en = 0, allmulti_en = 0;
+ int err = 0;
+
+ __update_mac_filter(nic_dev);
+
+ hinic_mac_filter_sync_all(nic_dev);
+
+ /* VF don't support to enter promisc mode */
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ promisc_en = !!(netdev->flags & IFF_PROMISC) ||
+ test_bit(HINIC_PROMISC_FORCE_ON,
+ &nic_dev->rx_mod_state);
+ }
+
+ allmulti_en = !!(netdev->flags & IFF_ALLMULTI) ||
+ test_bit(HINIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state);
+
+ if (promisc_en !=
+ test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) ||
+ allmulti_en !=
+ test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) {
+ enum hinic_rx_mod rx_mod = HINIC_DEFAULT_RX_MODE;
+
+ rx_mod |= (promisc_en ? HINIC_RX_MODE_PROMISC : 0);
+ rx_mod |= (allmulti_en ? HINIC_RX_MODE_MC_ALL : 0);
+
+ /* FOR DEBUG */
+ if (promisc_en !=
+ test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state))
+ nicif_info(nic_dev, drv, netdev,
+ "%s promisc mode\n",
+ promisc_en ? "Enter" : "Left");
+ if (allmulti_en !=
+ test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state))
+ nicif_info(nic_dev, drv, netdev,
+ "%s all_multi mode\n",
+ allmulti_en ? "Enter" : "Left");
+
+ err = hinic_set_rx_mode(nic_dev->hwdev, rx_mod);
+ if (!err) {
+ promisc_en ?
+ set_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) :
+ clear_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state);
+
+ allmulti_en ?
+ set_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) :
+ clear_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state);
+ } else {
+ nicif_err(nic_dev, drv, netdev, "Failed to set rx_mode\n");
+ }
+ }
+}
+
+static void hinic_nic_set_rx_mode(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt ||
+ netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) {
+ set_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags);
+ nic_dev->netdev_uc_cnt = netdev_uc_count(netdev);
+ nic_dev->netdev_mc_cnt = netdev_mc_count(netdev);
+ }
+
+ if (FUNC_SUPPORT_RX_MODE(nic_dev->hwdev))
+ queue_work(nic_dev->workq, &nic_dev->rx_mode_work);
+}
+
+static const struct net_device_ops hinic_netdev_ops = {
+ .ndo_open = hinic_open,
+ .ndo_stop = hinic_close,
+ .ndo_start_xmit = hinic_xmit_frame,
+ .ndo_get_stats64 = hinic_get_stats64,
+ .ndo_tx_timeout = hinic_tx_timeout,
+ .ndo_select_queue = hinic_select_queue,
+ .ndo_change_mtu = hinic_change_mtu,
+ .ndo_set_mac_address = hinic_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
+
+ .ndo_set_vf_mac = hinic_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
+ .ndo_set_vf_rate = hinic_ndo_set_vf_bw,
+ .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
+ .ndo_set_vf_trust = hinic_ndo_set_vf_trust,
+ .ndo_get_vf_config = hinic_ndo_get_vf_config,
+
+ .ndo_setup_tc = __hinic_setup_tc,
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = hinic_netpoll,
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+ .ndo_set_rx_mode = hinic_nic_set_rx_mode,
+ .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
+};
+
+static const struct net_device_ops hinicvf_netdev_ops = {
+ .ndo_open = hinic_open,
+ .ndo_stop = hinic_close,
+ .ndo_start_xmit = hinic_xmit_frame,
+ .ndo_get_stats64 = hinic_get_stats64,
+ .ndo_tx_timeout = hinic_tx_timeout,
+ .ndo_select_queue = hinic_select_queue,
+ .ndo_change_mtu = hinic_change_mtu,
+ .ndo_set_mac_address = hinic_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = hinic_netpoll,
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+ .ndo_set_rx_mode = hinic_nic_set_rx_mode,
+
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
+};
+
+static void netdev_feature_init(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ netdev_features_t hw_features;
+
+ netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 | NETIF_F_RXCSUM;
+
+ if (FUNC_SUPPORT_SCTP_CRC(nic_dev->hwdev))
+ netdev->features |= NETIF_F_SCTP_CRC;
+
+ netdev->vlan_features = netdev->features;
+
+ if (FUNC_SUPPORT_ENCAP_TSO_CSUM(nic_dev->hwdev))
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (FUNC_SUPPORT_HW_VLAN(nic_dev->hwdev)) {
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ /* copy netdev features into list of user selectable features */
+ hw_features = netdev->hw_features;
+ hw_features |= netdev->features;
+
+ if (FUNC_SUPPORT_LRO(nic_dev->hwdev)) {
+ /* LRO is disable in default, only set hw features */
+ hw_features |= NETIF_F_LRO;
+
+ /* Enable LRO */
+ if (nic_dev->adaptive_cfg.lro.enable &&
+ !HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ netdev->features |= NETIF_F_LRO;
+ }
+
+ netdev->hw_features = hw_features;
+
+ /* Set after hw_features because this could not be part of
+ * hw_features
+ */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (FUNC_SUPPORT_ENCAP_TSO_CSUM(nic_dev->hwdev)) {
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SCTP_CRC | NETIF_F_SG;
+ netdev->hw_enc_features |= NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_TSO_ECN
+ | NETIF_F_GSO_UDP_TUNNEL_CSUM
+ | NETIF_F_GSO_UDP_TUNNEL;
+ }
+}
+
+#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) { \
+ if ((num_qps) > (nic_dev)->max_qps) \
+ nic_warn(&(nic_dev)->pdev->dev, \
+ "Module Parameter %s value %d is out of range, "\
+ "Maximum value for the device: %d, using %d\n",\
+ #num_qps, num_qps, (nic_dev)->max_qps, \
+ (nic_dev)->max_qps); \
+ if (!(num_qps) || (num_qps) > (nic_dev)->max_qps) \
+ out_qps = (nic_dev)->max_qps; \
+ else \
+ out_qps = num_qps; \
+}
+
+static void hinic_try_to_enable_rss(struct hinic_nic_dev *nic_dev)
+{
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+ int i, node, err = 0;
+ u16 num_cpus = 0;
+ enum hinic_service_mode service_mode =
+ hinic_get_service_mode(nic_dev->hwdev);
+
+ nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev);
+ if (nic_dev->max_qps <= 1) {
+ clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
+ nic_dev->rss_limit = nic_dev->max_qps;
+ nic_dev->num_qps = nic_dev->max_qps;
+ nic_dev->num_rss = nic_dev->max_qps;
+
+ return;
+ }
+
+ err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx);
+ if (err) {
+ if (err == -ENOSPC)
+ nic_warn(&nic_dev->pdev->dev,
+ "Failed to alloc tmpl_idx for rss, table is full\n");
+ else
+ nic_err(&nic_dev->pdev->dev,
+ "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n");
+ clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
+ nic_dev->max_qps = 1;
+ nic_dev->rss_limit = nic_dev->max_qps;
+ nic_dev->num_qps = nic_dev->max_qps;
+ nic_dev->num_rss = nic_dev->max_qps;
+
+ return;
+ }
+
+ set_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
+
+ nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev);
+
+ MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, nic_dev->num_qps);
+
+ /* To reduce memory footprint in ovs mode.
+ * VF can't get board info correctly with early pf driver.
+ */
+ if ((hinic_get_func_mode(nic_dev->hwdev) == FUNC_MOD_NORMAL_HOST) &&
+ service_mode == HINIC_WORK_MODE_OVS &&
+ hinic_func_type(nic_dev->hwdev) != TYPE_VF)
+ MOD_PARA_VALIDATE_NUM_QPS(nic_dev, ovs_num_qps,
+ nic_dev->num_qps);
+
+ for (i = 0; i < (int)num_online_cpus(); i++) {
+ node = (int)cpu_to_node(i);
+ if (node == dev_to_node(&nic_dev->pdev->dev))
+ num_cpus++;
+ }
+
+ if (!num_cpus)
+ num_cpus = (u16)num_online_cpus();
+
+ nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
+
+ nic_dev->rss_limit = nic_dev->num_qps;
+ nic_dev->num_rss = nic_dev->num_qps;
+
+ hinic_init_rss_parameters(nic_dev->netdev);
+ hinic_set_hw_rss_parameters(nic_dev->netdev, 0, 0, prio_tc);
+}
+
+static int hinic_sw_init(struct hinic_nic_dev *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 func_id;
+ int err = 0;
+
+ sema_init(&adapter->port_state_sem, 1);
+
+ err = hinic_dcb_init(adapter);
+ if (err) {
+ nic_err(&adapter->pdev->dev, "Failed to init dcb\n");
+ return -EFAULT;
+ }
+
+ if (HINIC_FUNC_IS_VF(adapter->hwdev)) {
+ err = hinic_sq_cos_mapping(netdev);
+ if (err) {
+ nic_err(&adapter->pdev->dev, "Failed to set sq_cos_mapping\n");
+ return -EFAULT;
+ }
+ }
+
+ adapter->sq_depth = HINIC_SQ_DEPTH;
+ adapter->rq_depth = HINIC_RQ_DEPTH;
+
+ hinic_try_to_enable_rss(adapter);
+
+ err = hinic_get_default_mac(adapter->hwdev, netdev);
+ if (err) {
+ nic_err(&adapter->pdev->dev, "Failed to get MAC address\n");
+ goto get_mac_err;
+ }
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ if (!HINIC_FUNC_IS_VF(adapter->hwdev)) {
+ nic_err(&adapter->pdev->dev, "Invalid MAC address\n");
+ err = -EIO;
+ goto err_mac;
+ }
+
+ nic_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random\n",
+ netdev->dev_addr);
+ eth_hw_addr_random(netdev);
+ }
+
+ err = hinic_global_func_id_get(adapter->hwdev, &func_id);
+ if (err)
+ goto func_id_err;
+
+ err = hinic_set_mac(adapter->hwdev, netdev->dev_addr, 0, func_id);
+ /* When this is VF driver, we must consider that PF has already set VF
+ * MAC, and we can't consider this condition is error status during
+ * driver probe procedure.
+ */
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
+ nic_err(&adapter->pdev->dev, "Failed to set default MAC\n");
+ goto set_mac_err;
+ }
- num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ /* MTU range: 256 - 9600 */
+ netdev->min_mtu = HINIC_MIN_MTU_SIZE;
+ netdev->max_mtu = HINIC_MAX_JUMBO_FRAME_SIZE;
- netif_err(nic_dev, drv, netdev, "Tx timeout\n");
+ return 0;
- for (q_id = 0; q_id < num_sqs; q_id++) {
- if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id)))
- continue;
+set_mac_err:
+func_id_err:
+err_mac:
+get_mac_err:
+ if (test_bit(HINIC_RSS_ENABLE, &adapter->flags))
+ hinic_rss_template_free(adapter->hwdev, adapter->rss_tmpl_idx);
- sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id);
- sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask;
- hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask;
- sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask;
- netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n",
- q_id, sw_pi, hw_ci, sw_ci,
- nic_dev->txqs[q_id].napi.state);
- }
+ return err;
}
-static void hinic_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void hinic_assign_netdev_ops(struct hinic_nic_dev *adapter)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rxq_stats nic_rx_stats = {};
- struct hinic_txq_stats nic_tx_stats = {};
-
- if (nic_dev->flags & HINIC_INTF_UP)
- gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats);
-
- stats->rx_bytes = nic_rx_stats.bytes;
- stats->rx_packets = nic_rx_stats.pkts;
- stats->rx_errors = nic_rx_stats.errors;
-
- stats->tx_bytes = nic_tx_stats.bytes;
- stats->tx_packets = nic_tx_stats.pkts;
- stats->tx_errors = nic_tx_stats.tx_dropped;
+ if (!HINIC_FUNC_IS_VF(adapter->hwdev)) {
+ adapter->netdev->netdev_ops = &hinic_netdev_ops;
+ if (FUNC_SUPPORT_DCB(adapter->hwdev))
+ adapter->netdev->dcbnl_ops = &hinic_dcbnl_ops;
+ hinic_set_ethtool_ops(adapter->netdev);
+ } else {
+ adapter->netdev->netdev_ops = &hinicvf_netdev_ops;
+ hinicvf_set_ethtool_ops(adapter->netdev);
+ }
+ adapter->netdev->watchdog_timeo = 5 * HZ;
}
-static int hinic_set_features(struct net_device *netdev,
- netdev_features_t features)
+#define HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT 1
+#define HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER 1
+#define HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER 2
+#define HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER 3
+#define HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC_DFT_PG_100GE_TXRX_MSIX_COALESC_TIMER 2
+#define HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER 3
+
+static void update_queue_coal_param(struct hinic_nic_dev *nic_dev,
+ struct pci_device_id *id, u16 qid)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_intr_coal_info *info = NULL;
+
+ info = &nic_dev->intr_coalesce[qid];
+ if (!nic_dev->intr_coal_set_flag) {
+ switch (id->driver_data) {
+ case HINIC_BOARD_PG_TP_10GE:
+ info->pending_limt =
+ HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg =
+ HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER;
+ break;
+ case HINIC_BOARD_PG_SM_25GE:
+ info->pending_limt =
+ HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg =
+ HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER;
+ break;
+ case HINIC_BOARD_PG_100GE:
+ info->pending_limt =
+ HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg =
+ HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER;
+ break;
+ default:
+ info->pending_limt = qp_pending_limit;
+ info->coalesce_timer_cfg = qp_coalesc_timer_cfg;
+ break;
+ }
+ }
- return set_features(nic_dev, nic_dev->netdev->features,
- features, false);
+ info->resend_timer_cfg = HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ info->pkt_rate_high = HINIC_RX_RATE_HIGH;
+ info->rx_usecs_high = qp_coalesc_timer_high;
+ info->rx_pending_limt_high = qp_pending_limit_high;
+ info->pkt_rate_low = HINIC_RX_RATE_LOW;
+ info->rx_usecs_low = qp_coalesc_timer_low;
+ info->rx_pending_limt_low = qp_pending_limit_low;
+
+ if (nic_dev->in_vm) {
+ if (qp_pending_limit_high == HINIC_RX_PENDING_LIMIT_HIGH)
+ qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH_VM;
+ info->pkt_rate_low = HINIC_RX_RATE_LOW_VM;
+ info->rx_pending_limt_high = qp_pending_limit_high;
+ }
+
+ /* suit for sdi3.0 vm mode vf drv or bm mode pf/vf drv */
+ if ((nic_dev->is_vm_slave && nic_dev->in_vm) ||
+ nic_dev->is_bm_slave) {
+ info->pkt_rate_high = SDI_VM_RX_PKT_RATE_HIGH;
+ info->pkt_rate_low = SDI_VM_RX_PKT_RATE_LOW;
+
+ if (qp_coalesc_use_drv_params_switch == 0) {
+ /* if arm server, maybe need to change this value
+ * again
+ */
+ info->pending_limt = SDI_VM_PENDING_LIMT;
+ info->coalesce_timer_cfg = SDI_VM_COALESCE_TIMER_CFG;
+ info->rx_usecs_high = SDI_VM_RX_USECS_HIGH;
+ info->rx_pending_limt_high =
+ SDI_VM_RX_PENDING_LIMT_HIGH;
+ info->rx_usecs_low = SDI_VM_RX_USECS_LOW;
+ info->rx_pending_limt_low = SDI_VM_RX_PENDING_LIMT_LOW;
+ } else {
+ info->rx_usecs_high = qp_coalesc_timer_high;
+ info->rx_pending_limt_high = qp_pending_limit_high;
+ info->rx_usecs_low = qp_coalesc_timer_low;
+ info->rx_pending_limt_low = qp_pending_limit_low;
+ }
+ }
}
-static netdev_features_t hinic_fix_features(struct net_device *netdev,
- netdev_features_t features)
+static void init_intr_coal_param(struct hinic_nic_dev *nic_dev)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_device_id *id;
+ u16 i;
- /* If Rx checksum is disabled, then LRO should also be disabled */
- if (!(features & NETIF_F_RXCSUM)) {
- netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n");
- features &= ~NETIF_F_LRO;
+ id = hinic_get_pci_device_id(nic_dev->pdev);
+ switch (id->driver_data) {
+ case HINIC_BOARD_10GE:
+ case HINIC_BOARD_PG_TP_10GE:
+ nic_dev->his_link_speed = SPEED_10000;
+ break;
+ case HINIC_BOARD_25GE:
+ case HINIC_BOARD_PG_SM_25GE:
+ nic_dev->his_link_speed = SPEED_25000;
+ break;
+ case HINIC_BOARD_40GE:
+ nic_dev->his_link_speed = SPEED_40000;
+ break;
+ case HINIC_BOARD_100GE:
+ case HINIC_BOARD_PG_100GE:
+ nic_dev->his_link_speed = SPEED_100000;
+ break;
+ default:
+ break;
}
- return features;
+ for (i = 0; i < nic_dev->max_qps; i++)
+ update_queue_coal_param(nic_dev, id, i);
}
-static const struct net_device_ops hinic_netdev_ops = {
- .ndo_open = hinic_open,
- .ndo_stop = hinic_close,
- .ndo_change_mtu = hinic_change_mtu,
- .ndo_set_mac_address = hinic_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
- .ndo_set_rx_mode = hinic_set_rx_mode,
- .ndo_start_xmit = hinic_xmit_frame,
- .ndo_tx_timeout = hinic_tx_timeout,
- .ndo_get_stats64 = hinic_get_stats64,
- .ndo_fix_features = hinic_fix_features,
- .ndo_set_features = hinic_set_features,
- .ndo_set_vf_mac = hinic_ndo_set_vf_mac,
- .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
- .ndo_get_vf_config = hinic_ndo_get_vf_config,
- .ndo_set_vf_trust = hinic_ndo_set_vf_trust,
- .ndo_set_vf_rate = hinic_ndo_set_vf_bw,
- .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
- .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
-};
+static int hinic_init_intr_coalesce(struct hinic_nic_dev *nic_dev)
+{
+ u64 size;
-static const struct net_device_ops hinicvf_netdev_ops = {
- .ndo_open = hinic_open,
- .ndo_stop = hinic_close,
- .ndo_change_mtu = hinic_change_mtu,
- .ndo_set_mac_address = hinic_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
- .ndo_set_rx_mode = hinic_set_rx_mode,
- .ndo_start_xmit = hinic_xmit_frame,
- .ndo_tx_timeout = hinic_tx_timeout,
- .ndo_get_stats64 = hinic_get_stats64,
- .ndo_fix_features = hinic_fix_features,
- .ndo_set_features = hinic_set_features,
-};
+ if (qp_pending_limit != HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT ||
+ qp_coalesc_timer_cfg != HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG)
+ nic_dev->intr_coal_set_flag = 1;
+ else
+ nic_dev->intr_coal_set_flag = 0;
-static void netdev_features_init(struct net_device *netdev)
-{
- netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_RXCSUM | NETIF_F_LRO |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps;
+ if (!size) {
+ nic_err(&nic_dev->pdev->dev, "Cannot allocate zero size intr coalesce\n");
+ return -EINVAL;
+ }
+ nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL);
+ if (!nic_dev->intr_coalesce)
+ return -ENOMEM;
- netdev->vlan_features = netdev->hw_features;
+ init_intr_coal_param(nic_dev);
- netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (test_bit(HINIC_INTR_ADAPT, &nic_dev->flags))
+ nic_dev->adaptive_rx_coal = 1;
+ else
+ nic_dev->adaptive_rx_coal = 0;
- netdev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SCTP_CRC |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_UDP_TUNNEL;
+ return 0;
}
-static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
+static void hinic_free_intr_coalesce(struct hinic_nic_dev *nic_dev)
{
- struct hinic_nic_cfg *nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
- struct hinic_pause_config pause_info = {0};
- struct hinic_port_cap port_cap = {0};
-
- if (hinic_port_get_cap(nic_dev, &port_cap))
- return;
-
- mutex_lock(&nic_cfg->cfg_mutex);
- if (nic_cfg->pause_set || !port_cap.autoneg_state) {
- nic_cfg->auto_neg = port_cap.autoneg_state;
- pause_info.auto_neg = nic_cfg->auto_neg;
- pause_info.rx_pause = nic_cfg->rx_pause;
- pause_info.tx_pause = nic_cfg->tx_pause;
- hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
- }
- mutex_unlock(&nic_cfg->cfg_mutex);
+ kfree(nic_dev->intr_coalesce);
}
-/**
- * link_status_event_handler - link event handler
- * @handle: nic device for the handler
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_out: output buffer
- * @out_size: returned output size
- **/
-static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
+static int hinic_alloc_qps(struct hinic_nic_dev *nic_dev)
{
- struct hinic_port_link_status *link_status, *ret_link_status;
- struct hinic_dev *nic_dev = handle;
+ struct net_device *netdev = nic_dev->netdev;
+ int err;
- link_status = buf_in;
+ err = hinic_alloc_txqs(netdev);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to alloc txqs\n");
+ return err;
+ }
- if (link_status->link == HINIC_LINK_STATE_UP) {
- down(&nic_dev->mgmt_lock);
+ err = hinic_alloc_rxqs(netdev);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to alloc rxqs\n");
+ goto alloc_rxqs_err;
+ }
- nic_dev->flags |= HINIC_LINK_UP;
- nic_dev->cable_unplugged = false;
- nic_dev->module_unrecognized = false;
+ err = hinic_init_intr_coalesce(nic_dev);
+ if (err) {
+ nic_err(&nic_dev->pdev->dev, "Failed to init_intr_coalesce\n");
+ goto init_intr_err;
+ }
- if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
- (HINIC_LINK_UP | HINIC_INTF_UP)) {
- netif_carrier_on(nic_dev->netdev);
- netif_tx_wake_all_queues(nic_dev->netdev);
- }
+ return 0;
- up(&nic_dev->mgmt_lock);
+init_intr_err:
+ hinic_free_rxqs(netdev);
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- hinic_refresh_nic_cfg(nic_dev);
+alloc_rxqs_err:
+ hinic_free_txqs(netdev);
- netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n");
- } else {
- down(&nic_dev->mgmt_lock);
+ return err;
+}
- nic_dev->flags &= ~HINIC_LINK_UP;
+static void hinic_destroy_qps(struct hinic_nic_dev *nic_dev)
+{
+ hinic_free_intr_coalesce(nic_dev);
+ hinic_free_rxqs(nic_dev->netdev);
+ hinic_free_txqs(nic_dev->netdev);
+}
- netif_carrier_off(nic_dev->netdev);
- netif_tx_disable(nic_dev->netdev);
+static int hinic_validate_parameters(struct hinic_lld_dev *lld_dev)
+{
+ struct pci_dev *pdev = lld_dev->pdev;
+
+ /* Check poll_weight value, default poll_weight is 64.
+ * The poll_weight isn't more than max queue depth,
+ * so the valid value range is 1~4096.
+ */
+ if (!poll_weight) {
+ nic_warn(&pdev->dev, "Module Parameter poll_weight can not be 0, resetting to %d\n",
+ DEFAULT_POLL_WEIGHT);
+ poll_weight = DEFAULT_POLL_WEIGHT;
+ }
- up(&nic_dev->mgmt_lock);
+ if (poll_weight > HINIC_MAX_QUEUE_DEPTH) {
+ nic_warn(&pdev->dev, "Module Parameter poll_weight value %u is out of 1~%d, resetting to max value %d\n",
+ poll_weight, HINIC_MAX_QUEUE_DEPTH,
+ HINIC_MAX_QUEUE_DEPTH);
+ poll_weight = HINIC_MAX_QUEUE_DEPTH;
+ }
- netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n");
+ /* check rx_buff value, default rx_buff is 2KB.
+ * Invalid rx_buff include 2KB/4KB/8KB/16KB.
+ */
+ if (rx_buff != RX_BUFF_VALID_2KB && rx_buff != RX_BUFF_VALID_4KB &&
+ rx_buff != RX_BUFF_VALID_8KB && rx_buff != RX_BUFF_VALID_16KB) {
+ nic_warn(&pdev->dev, "Module Parameter rx_buff value %d is out of range, must be 2^n. Valid range is 2 - 16, resetting to %dKB",
+ rx_buff, DEFAULT_RX_BUFF_LEN);
+ rx_buff = DEFAULT_RX_BUFF_LEN;
}
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- hinic_notify_all_vfs_link_changed(nic_dev->hwdev,
- link_status->link);
+ if (qp_coalesc_timer_high <= qp_coalesc_timer_low) {
+ nic_warn(&pdev->dev, "Module Parameter qp_coalesc_timer_high: %d, qp_coalesc_timer_low: %d is invalid, resetting to default\n",
+ qp_coalesc_timer_high, qp_coalesc_timer_low);
+ qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH;
+ qp_coalesc_timer_low = HINIC_RX_COAL_TIME_LOW;
+ }
- ret_link_status = buf_out;
- ret_link_status->status = 0;
+ if (qp_pending_limit_high <= qp_pending_limit_low) {
+ nic_warn(&pdev->dev, "Module Parameter qp_pending_limit_high: %d, qp_pending_limit_low: %d is invalid, resetting to default\n",
+ qp_pending_limit_high, qp_pending_limit_low);
+ qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH;
+ qp_pending_limit_low = HINIC_RX_PENDING_LIMIT_LOW;
+ }
- *out_size = sizeof(*ret_link_status);
+ return 0;
}
-static void cable_plug_event(void *handle,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
+static void check_lro_module_param(struct hinic_nic_dev *nic_dev)
{
- struct hinic_cable_plug_event *plug_event = buf_in;
- struct hinic_dev *nic_dev = handle;
-
- nic_dev->cable_unplugged = plug_event->plugged ? false : true;
-
- *out_size = sizeof(*plug_event);
- plug_event = buf_out;
- plug_event->status = 0;
+ struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro;
+
+ /* Use module parameters first. */
+ if (set_lro_timer != 0 &&
+ set_lro_timer >= HINIC_LRO_RX_TIMER_LOWER &&
+ set_lro_timer <= HINIC_LRO_RX_TIMER_UPPER)
+ lro->timer = set_lro_timer;
+
+ /* Use module parameters first. */
+ if (set_max_wqe_num != 0 &&
+ set_max_wqe_num <= HINIC_LRO_MAX_WQE_NUM_UPPER &&
+ set_max_wqe_num >= HINIC_LRO_MAX_WQE_NUM_LOWER)
+ lro->buffer_size = set_max_wqe_num * nic_dev->rx_buff_len;
}
-static void link_err_event(void *handle,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
+static void decide_rss_cfg(struct hinic_nic_dev *nic_dev)
{
- struct hinic_link_err_event *link_err = buf_in;
- struct hinic_dev *nic_dev = handle;
+ struct hinic_environment_info *info = &nic_dev->env_info;
- if (link_err->err_type >= LINK_ERR_NUM)
- netif_info(nic_dev, link, nic_dev->netdev,
- "Link failed, Unknown error type: 0x%x\n",
- link_err->err_type);
- else
- nic_dev->module_unrecognized = true;
+ switch (info->cpu) {
+ case HINIC_CPU_ARM_GENERIC:
+ set_bit(HINIC_SAME_RXTX, &nic_dev->flags);
- *out_size = sizeof(*link_err);
- link_err = buf_out;
- link_err->status = 0;
-}
+ break;
+ case HINIC_CPU_X86_GENERIC:
+ clear_bit(HINIC_SAME_RXTX, &nic_dev->flags);
-static int set_features(struct hinic_dev *nic_dev,
- netdev_features_t pre_features,
- netdev_features_t features, bool force_change)
-{
- netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
- u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
- netdev_features_t failed_features = 0;
- int ret = 0;
- int err = 0;
+ break;
- if (changed & NETIF_F_TSO) {
- ret = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
- HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
- if (ret) {
- err = ret;
- failed_features |= NETIF_F_TSO;
- }
+ default:
+ clear_bit(HINIC_SAME_RXTX, &nic_dev->flags);
+ break;
}
+}
- if (changed & NETIF_F_RXCSUM) {
- ret = hinic_set_rx_csum_offload(nic_dev, csum_en);
- if (ret) {
- err = ret;
- failed_features |= NETIF_F_RXCSUM;
- }
- }
+static void decide_lro_cfg(struct hinic_nic_dev *nic_dev)
+{
+ struct hinic_environment_info *info = &nic_dev->env_info;
+ struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro;
- if (changed & NETIF_F_LRO) {
- ret = hinic_set_rx_lro_state(nic_dev,
- !!(features & NETIF_F_LRO),
- HINIC_LRO_RX_TIMER_DEFAULT,
- HINIC_LRO_MAX_WQE_NUM_DEFAULT);
- if (ret) {
- err = ret;
- failed_features |= NETIF_F_LRO;
+ if (lro_en_status < HINIC_LRO_STATUS_UNSET) {
+ lro->enable = lro_en_status;
+ } else {
+ /* LRO will be opened in all Huawei OS */
+ switch (info->os) {
+ case HINIC_OS_HUAWEI:
+ lro->enable = 1;
+ break;
+ case HINIC_OS_NON_HUAWEI:
+ lro->enable = 0;
+ break;
+ default:
+ lro->enable = 0;
+ break;
}
}
- if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
- ret = hinic_set_rx_vlan_offload(nic_dev,
- !!(features &
- NETIF_F_HW_VLAN_CTAG_RX));
- if (ret) {
- err = ret;
- failed_features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
+ switch (info->board) {
+ case HINIC_BOARD_25GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_25GE;
+ break;
+ case HINIC_BOARD_100GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_100GE;
+ break;
+ case HINIC_BOARD_PG_TP_10GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_10GE;
+ break;
+ case HINIC_BOARD_PG_SM_25GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT;
+ break;
+ case HINIC_BOARD_PG_100GE:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_100GE;
+ break;
+ default:
+ lro->timer = HINIC_LRO_RX_TIMER_DEFAULT;
+ break;
}
- if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
- ret = hinic_set_vlan_fliter(nic_dev,
- !!(features &
- NETIF_F_HW_VLAN_CTAG_FILTER));
- if (ret) {
- err = ret;
- failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- }
+ /* Use module parameters first. */
+ switch (info->cpu) {
+ case HINIC_CPU_ARM_GENERIC:
+ lro->buffer_size =
+ HINIC_LRO_MAX_WQE_NUM_DEFAULT_ARM *
+ nic_dev->rx_buff_len;
+ break;
+ case HINIC_CPU_X86_GENERIC:
+ lro->buffer_size =
+ HINIC_LRO_MAX_WQE_NUM_DEFAULT_X86 *
+ nic_dev->rx_buff_len;
+ break;
+ default:
+ lro->buffer_size =
+ HINIC_LRO_MAX_WQE_NUM_DEFAULT *
+ nic_dev->rx_buff_len;
+ break;
}
- if (err) {
- nic_dev->netdev->features = features ^ failed_features;
- return -EIO;
+ /* lro buffer_size need modify according board type */
+ switch (info->board) {
+ case HINIC_BOARD_PG_TP_10GE:
+ case HINIC_BOARD_PG_SM_25GE:
+ case HINIC_BOARD_PG_100GE:
+ lro->buffer_size =
+ HINIC_LRO_WQE_NUM_PANGEA_DEFAULT * nic_dev->rx_buff_len;
+ break;
+ default:
+ break;
}
- return 0;
+ check_lro_module_param(nic_dev);
+
+ nic_info(&nic_dev->pdev->dev,
+ "LRO default configuration: enable %u, timer %u, buffer size %u\n",
+ lro->enable, lro->timer, lro->buffer_size);
}
-static int hinic_init_intr_coalesce(struct hinic_dev *nic_dev)
+static void decide_intr_cfg(struct hinic_nic_dev *nic_dev)
{
- u64 size;
- u16 i;
-
- size = sizeof(struct hinic_intr_coal_info) * nic_dev->max_qps;
- nic_dev->rx_intr_coalesce = kzalloc(size, GFP_KERNEL);
- if (!nic_dev->rx_intr_coalesce)
- return -ENOMEM;
- nic_dev->tx_intr_coalesce = kzalloc(size, GFP_KERNEL);
- if (!nic_dev->tx_intr_coalesce) {
- kfree(nic_dev->rx_intr_coalesce);
- return -ENOMEM;
- }
-
- for (i = 0; i < nic_dev->max_qps; i++) {
- nic_dev->rx_intr_coalesce[i].pending_limt =
- HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
- nic_dev->rx_intr_coalesce[i].coalesce_timer_cfg =
- HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
- nic_dev->rx_intr_coalesce[i].resend_timer_cfg =
- HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
- nic_dev->tx_intr_coalesce[i].pending_limt =
- HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
- nic_dev->tx_intr_coalesce[i].coalesce_timer_cfg =
- HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
- nic_dev->tx_intr_coalesce[i].resend_timer_cfg =
- HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ struct pci_device_id *id;
+
+ id = hinic_get_pci_device_id(nic_dev->pdev);
+ switch (id->driver_data) {
+ case HINIC_BOARD_PG_TP_10GE:
+ case HINIC_BOARD_PG_SM_25GE:
+ case HINIC_BOARD_PG_100GE:
+ clear_bit(HINIC_INTR_ADAPT, &nic_dev->flags);
+ break;
+ default:
+ set_bit(HINIC_INTR_ADAPT, &nic_dev->flags);
+ break;
}
-
- return 0;
}
-static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
+static void adaptive_configuration_init(struct hinic_nic_dev *nic_dev)
{
- kfree(nic_dev->tx_intr_coalesce);
- kfree(nic_dev->rx_intr_coalesce);
+ struct pci_device_id *id;
+
+ id = hinic_get_pci_device_id(nic_dev->pdev);
+ if (id)
+ nic_dev->env_info.board = id->driver_data;
+ else
+ nic_dev->env_info.board = HINIC_BOARD_UNKNOWN;
+
+ nic_dev->env_info.os = HINIC_OS_HUAWEI;
+
+#if defined(__aarch64__)
+ nic_dev->env_info.cpu = HINIC_CPU_ARM_GENERIC;
+#elif defined(__x86_64__)
+ nic_dev->env_info.cpu = HINIC_CPU_X86_GENERIC;
+#else
+ nic_dev->env_info.cpu = HINIC_CPU_UNKNOWN;
+#endif
+
+ nic_info(&nic_dev->pdev->dev,
+ "Board type %u, OS type %u, CPU type %u\n",
+ nic_dev->env_info.board, nic_dev->env_info.os,
+ nic_dev->env_info.cpu);
+
+ decide_lro_cfg(nic_dev);
+ decide_rss_cfg(nic_dev);
+ decide_intr_cfg(nic_dev);
}
-/**
- * nic_dev_init - Initialize the NIC device
- * @pdev: the NIC pci device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int nic_dev_init(struct pci_dev *pdev)
+static int nic_probe(struct hinic_lld_dev *lld_dev, void **uld_dev,
+ char *uld_dev_name)
{
- struct hinic_rx_mode_work *rx_mode_work;
- struct hinic_dev *nic_dev;
+ struct pci_dev *pdev = lld_dev->pdev;
+ struct hinic_nic_dev *nic_dev;
struct net_device *netdev;
- struct hinic_hwdev *hwdev;
- struct devlink *devlink;
- u8 addr[ETH_ALEN];
- int err, num_qps;
-
- devlink = hinic_devlink_alloc(&pdev->dev);
- if (!devlink) {
- dev_err(&pdev->dev, "Hinic devlink alloc failed\n");
- return -ENOMEM;
- }
+ u16 max_qps;
+ u32 page_num;
+ int err;
- hwdev = hinic_init_hwdev(pdev, devlink);
- if (IS_ERR(hwdev)) {
- dev_err(&pdev->dev, "Failed to initialize HW device\n");
- hinic_devlink_free(devlink);
- return PTR_ERR(hwdev);
- }
+ /* *uld_dev should always no be NULL */
+ *uld_dev = lld_dev;
- num_qps = hinic_hwdev_num_qps(hwdev);
- if (num_qps <= 0) {
- dev_err(&pdev->dev, "Invalid number of QPS\n");
- err = -EINVAL;
- goto err_num_qps;
+ if (!hinic_support_nic(lld_dev->hwdev, NULL)) {
+ nic_info(&pdev->dev, "Hw don't support nic\n");
+ return 0;
}
- netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps);
+ err = hinic_validate_parameters(lld_dev);
+ if (err)
+ return -EINVAL;
+
+ max_qps = hinic_func_max_nic_qnum(lld_dev->hwdev);
+ netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps);
if (!netdev) {
- dev_err(&pdev->dev, "Failed to allocate Ethernet device\n");
- err = -ENOMEM;
- goto err_alloc_etherdev;
+ nic_err(&pdev->dev, "Failed to allocate ETH device\n");
+ return -ENOMEM;
}
- if (!HINIC_IS_VF(hwdev->hwif))
- netdev->netdev_ops = &hinic_netdev_ops;
- else
- netdev->netdev_ops = &hinicvf_netdev_ops;
-
- netdev->max_mtu = HINIC_MAX_MTU_SIZE;
- netdev->min_mtu = HINIC_MIN_MTU_SIZE;
-
- nic_dev = netdev_priv(netdev);
- nic_dev->netdev = netdev;
- nic_dev->hwdev = hwdev;
- nic_dev->msg_enable = MSG_ENABLE_DEFAULT;
- nic_dev->flags = 0;
- nic_dev->txqs = NULL;
- nic_dev->rxqs = NULL;
- nic_dev->tx_weight = tx_weight;
- nic_dev->rx_weight = rx_weight;
- nic_dev->sq_depth = HINIC_SQ_DEPTH;
- nic_dev->rq_depth = HINIC_RQ_DEPTH;
- nic_dev->sriov_info.hwdev = hwdev;
- nic_dev->sriov_info.pdev = pdev;
- nic_dev->max_qps = num_qps;
- nic_dev->devlink = devlink;
-
- hinic_set_ethtool_ops(netdev);
-
- sema_init(&nic_dev->mgmt_lock, 1);
-
- nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID,
- GFP_KERNEL);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ nic_dev = (struct hinic_nic_dev *)netdev_priv(netdev);
+ nic_dev->hwdev = lld_dev->hwdev;
+ nic_dev->pdev = pdev;
+ nic_dev->poll_weight = (int)poll_weight;
+ nic_dev->msg_enable = DEFAULT_MSG_ENABLE;
+ nic_dev->heart_status = true;
+ nic_dev->in_vm = !hinic_is_in_host();
+ nic_dev->is_vm_slave = is_multi_vm_slave(lld_dev->hwdev);
+ nic_dev->is_bm_slave = is_multi_bm_slave(lld_dev->hwdev);
+ nic_dev->lro_replenish_thld = lro_replenish_thld;
+ nic_dev->rx_buff_len = (u16)(rx_buff * CONVERT_UNIT);
+ page_num = (RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len) / PAGE_SIZE;
+ nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0;
+
+ mutex_init(&nic_dev->nic_mutex);
+
+ adaptive_configuration_init(nic_dev);
+
+ nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL);
if (!nic_dev->vlan_bitmap) {
err = -ENOMEM;
- goto err_vlan_bitmap;
+ goto vlan_bitmap_err;
}
+ nic_dev->netdev = netdev;
+ hinic_assign_netdev_ops(nic_dev);
+ netdev_feature_init(netdev);
+ /* get nic cap from hw */
+ hinic_support_nic(lld_dev->hwdev, &nic_dev->nic_cap);
- nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME);
- if (!nic_dev->workq) {
- err = -ENOMEM;
- goto err_workq;
+ err = hinic_init_nic_hwdev(nic_dev->hwdev, nic_dev->rx_buff_len);
+ if (err) {
+ nic_err(&pdev->dev, "Failed to init nic hwdev\n");
+ goto init_nic_hwdev_err;
}
- pci_set_drvdata(pdev, netdev);
-
- err = hinic_port_get_mac(nic_dev, addr);
+ err = hinic_set_super_cqe_state(nic_dev->hwdev, true);
if (err) {
- dev_err(&pdev->dev, "Failed to get mac address\n");
- goto err_get_mac;
+ nic_err(&pdev->dev, "Failed to set super cqe\n");
+ goto set_supper_cqe_err;
}
- eth_hw_addr_set(netdev, addr);
- if (!is_valid_ether_addr(netdev->dev_addr)) {
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
- dev_err(&pdev->dev, "Invalid MAC address\n");
- err = -EIO;
- goto err_add_mac;
- }
+ err = hinic_sw_init(nic_dev);
+ if (err)
+ goto sw_init_err;
- dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
- netdev->dev_addr);
- eth_hw_addr_random(netdev);
+ err = hinic_alloc_qps(nic_dev);
+ if (err) {
+ nic_err(&pdev->dev, "Failed to alloc qps\n");
+ goto alloc_qps_err;
}
- err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0);
- if (err && err != HINIC_PF_SET_VF_ALREADY) {
- dev_err(&pdev->dev, "Failed to add mac\n");
- goto err_add_mac;
+ nic_dev->workq = create_singlethread_workqueue(HINIC_NIC_DEV_WQ_NAME);
+ if (!nic_dev->workq) {
+ nic_err(&pdev->dev, "Failed to initialize AEQ workqueue\n");
+ err = -ENOMEM;
+ goto create_workq_err;
}
- err = hinic_port_set_mtu(nic_dev, netdev->mtu);
- if (err) {
- dev_err(&pdev->dev, "Failed to set mtu\n");
- goto err_set_mtu;
- }
+ INIT_LIST_HEAD(&nic_dev->uc_filter_list);
+ INIT_LIST_HEAD(&nic_dev->mc_filter_list);
+ INIT_WORK(&nic_dev->rx_mode_work, hinic_set_rx_mode_work);
- rx_mode_work = &nic_dev->rx_mode_work;
- INIT_WORK(&rx_mode_work->work, set_rx_mode);
+ err = hinic_set_default_hw_feature(nic_dev);
+ if (err)
+ goto set_features_err;
- netdev_features_init(netdev);
+ hinic_register_notifier(nic_dev);
- netif_carrier_off(netdev);
+ err = register_netdev(netdev);
+ if (err) {
+ nic_err(&pdev->dev, "Failed to register netdev\n");
+ err = -ENOMEM;
+ goto netdev_err;
+ }
- hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
- nic_dev, link_status_event_handler);
- hinic_hwdev_cb_register(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT,
- nic_dev, cable_plug_event);
- hinic_hwdev_cb_register(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT,
- nic_dev, link_err_event);
+ netif_carrier_off(netdev);
- err = set_features(nic_dev, 0, nic_dev->netdev->features, true);
- if (err)
- goto err_set_features;
+ *uld_dev = nic_dev;
+ nicif_info(nic_dev, probe, netdev, "Register netdev succeed\n");
- /* enable pause and disable pfc by default */
- err = hinic_dcb_set_pfc(nic_dev->hwdev, 0, 0);
- if (err)
- goto err_set_pfc;
+ return 0;
- SET_NETDEV_DEV(netdev, &pdev->dev);
+netdev_err:
+ hinic_unregister_notifier(nic_dev);
- err = hinic_init_intr_coalesce(nic_dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init_intr_coalesce\n");
- goto err_init_intr;
- }
+set_features_err:
+ destroy_workqueue(nic_dev->workq);
- hinic_dbg_init(nic_dev);
+create_workq_err:
+ hinic_destroy_qps(nic_dev);
- hinic_func_tbl_dbgfs_init(nic_dev);
+alloc_qps_err:
+ hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
+ hinic_global_func_id_hw(nic_dev->hwdev));
- err = hinic_func_table_debug_add(nic_dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to add func_table debug\n");
- goto err_add_func_table_dbg;
- }
+sw_init_err:
+ (void)hinic_set_super_cqe_state(nic_dev->hwdev, false);
- err = register_netdev(netdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to register netdev\n");
- goto err_reg_netdev;
- }
+set_supper_cqe_err:
+ hinic_free_nic_hwdev(nic_dev->hwdev);
- return 0;
+init_nic_hwdev_err:
+ kfree(nic_dev->vlan_bitmap);
-err_reg_netdev:
- hinic_func_table_debug_rem(nic_dev);
-err_add_func_table_dbg:
- hinic_func_tbl_dbgfs_uninit(nic_dev);
- hinic_dbg_uninit(nic_dev);
- hinic_free_intr_coalesce(nic_dev);
-err_init_intr:
-err_set_pfc:
-err_set_features:
- hinic_hwdev_cb_unregister(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
- hinic_hwdev_cb_unregister(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
- hinic_hwdev_cb_unregister(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_LINK_STATUS);
- cancel_work_sync(&rx_mode_work->work);
-
-err_set_mtu:
- hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
-err_add_mac:
-err_get_mac:
- pci_set_drvdata(pdev, NULL);
- destroy_workqueue(nic_dev->workq);
-err_workq:
-err_vlan_bitmap:
+vlan_bitmap_err:
free_netdev(netdev);
-err_alloc_etherdev:
-err_num_qps:
- hinic_free_hwdev(hwdev);
- hinic_devlink_free(devlink);
return err;
}
-static int hinic_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static void nic_remove(struct hinic_lld_dev *lld_dev, void *adapter)
{
- int err = pci_enable_device(pdev);
+ struct hinic_nic_dev *nic_dev = adapter;
+ struct net_device *netdev;
- if (err)
- return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n");
+ if (!nic_dev || !hinic_support_nic(lld_dev->hwdev, NULL))
+ return;
- err = pci_request_regions(pdev, HINIC_DRV_NAME);
- if (err) {
- dev_err(&pdev->dev, "Failed to request PCI regions\n");
- goto err_pci_regions;
- }
+ netdev = nic_dev->netdev;
- pci_set_master(pdev);
+ unregister_netdev(netdev);
+ hinic_unregister_notifier(nic_dev);
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "Failed to set DMA mask\n");
- goto err_dma_mask;
- }
+ cancel_work_sync(&nic_dev->rx_mode_work);
+ destroy_workqueue(nic_dev->workq);
- err = nic_dev_init(pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize NIC device\n");
- goto err_nic_dev_init;
- }
+ hinic_destroy_qps(nic_dev);
- dev_info(&pdev->dev, "HiNIC driver - probed\n");
- return 0;
+ hinic_clean_mac_list_filter(nic_dev);
+ hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
+ hinic_global_func_id_hw(nic_dev->hwdev));
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
+ hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx);
-err_nic_dev_init:
-err_dma_mask:
- pci_release_regions(pdev);
+ (void)hinic_set_super_cqe_state(nic_dev->hwdev, false);
-err_pci_regions:
- pci_disable_device(pdev);
- return err;
+ hinic_free_nic_hwdev(nic_dev->hwdev);
+
+ kfree(nic_dev->vlan_bitmap);
+
+ free_netdev(netdev);
}
-static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
+int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev)
{
- struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
- u32 loop_cnt = 0;
+ struct net_device *netdev = nic_dev->netdev;
+ int err, err_netdev = 0;
- set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
- usleep_range(9900, 10000);
+ nicif_info(nic_dev, drv, netdev, "Start to disable RSS\n");
- while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
- if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
- !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
- return;
+ if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ nicif_info(nic_dev, drv, netdev, "RSS not enabled, do nothing\n");
+ return 0;
+ }
- usleep_range(9900, 10000);
- loop_cnt++;
+ if (netif_running(netdev)) {
+ err_netdev = hinic_close(netdev);
+ if (err_netdev) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
+ }
}
-}
-static void hinic_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct devlink *devlink = nic_dev->devlink;
- struct hinic_rx_mode_work *rx_mode_work;
+ /* free rss template */
+ err = hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to free RSS template\n");
+ } else {
+ nicif_info(nic_dev, drv, netdev, "Success to free RSS template\n");
+ clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
+ }
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
- wait_sriov_cfg_complete(nic_dev);
- hinic_pci_sriov_disable(pdev);
+ if (netif_running(netdev)) {
+ err_netdev = hinic_open(netdev);
+ if (err_netdev)
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
}
- unregister_netdev(netdev);
+ return err ? err : err_netdev;
+}
- hinic_func_table_debug_rem(nic_dev);
+int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ int err, err_netdev = 0;
- hinic_func_tbl_dbgfs_uninit(nic_dev);
+ nicif_info(nic_dev, drv, netdev, "Start to enable RSS\n");
- hinic_dbg_uninit(nic_dev);
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ nicif_info(nic_dev, drv, netdev, "RSS already enabled, do nothing\n");
+ return 0;
+ }
- hinic_free_intr_coalesce(nic_dev);
+ if (netif_running(netdev)) {
+ err_netdev = hinic_close(netdev);
+ if (err_netdev) {
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
+ }
+ }
- hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
+ err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx);
+ if (err) {
+ if (err == -ENOSPC)
+ nicif_warn(nic_dev, drv, netdev,
+ "Failed to alloc RSS template, table is full\n");
+ else
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to alloc RSS template\n");
+ } else {
+ set_bit(HINIC_RSS_ENABLE, &nic_dev->flags);
+ nicif_info(nic_dev, drv, netdev, "Success to alloc RSS template\n");
+ }
- hinic_hwdev_cb_unregister(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
- hinic_hwdev_cb_unregister(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
- hinic_hwdev_cb_unregister(nic_dev->hwdev,
- HINIC_MGMT_MSG_CMD_LINK_STATUS);
+ if (netif_running(netdev)) {
+ err_netdev = hinic_open(netdev);
+ if (err_netdev)
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ }
- rx_mode_work = &nic_dev->rx_mode_work;
- cancel_work_sync(&rx_mode_work->work);
+ return err ? err : err_netdev;
+}
- pci_set_drvdata(pdev, NULL);
+static const char *hinic_module_link_err[LINK_ERR_NUM] = {
+ "Unrecognized module",
+};
- destroy_workqueue(nic_dev->workq);
+static void hinic_port_module_event_handler(struct hinic_nic_dev *nic_dev,
+ struct hinic_event_info *event)
+{
+ enum port_module_event_type type = event->module_event.type;
+ enum link_err_type err_type = event->module_event.err_type;
+
+ switch (type) {
+ case HINIC_PORT_MODULE_CABLE_PLUGGED:
+ case HINIC_PORT_MODULE_CABLE_UNPLUGGED:
+ nicif_info(nic_dev, link, nic_dev->netdev,
+ "Port module event: Cable %s\n",
+ type == HINIC_PORT_MODULE_CABLE_PLUGGED ?
+ "plugged" : "unplugged");
+ break;
+ case HINIC_PORT_MODULE_LINK_ERR:
+ if (err_type >= LINK_ERR_NUM) {
+ nicif_info(nic_dev, link, nic_dev->netdev,
+ "Link failed, Unknown error type: 0x%x\n",
+ err_type);
+ } else {
+ nicif_info(nic_dev, link, nic_dev->netdev,
+ "Link failed, error type: 0x%x: %s\n",
+ err_type, hinic_module_link_err[err_type]);
+ }
+ break;
+ default:
+ nicif_err(nic_dev, link, nic_dev->netdev,
+ "Unknown port module type %d\n", type);
+ break;
+ }
+}
- hinic_free_hwdev(nic_dev->hwdev);
+static void hinic_intr_coalesc_change(struct hinic_nic_dev *nic_dev,
+ struct hinic_event_info *event)
+{
+ u32 hw_to_os_speed[LINK_SPEED_LEVELS] = {SPEED_10, SPEED_100,
+ SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000,
+ SPEED_100000};
+ u8 qid, coalesc_timer_cfg, pending_limt;
+ struct pci_device_id *id;
+ u32 speed;
+ int err;
- free_netdev(netdev);
+ if (nic_dev->adaptive_rx_coal)
+ return;
- hinic_devlink_free(devlink);
+ speed = hw_to_os_speed[event->link_info.speed];
+ if (speed == nic_dev->his_link_speed)
+ return;
- pci_release_regions(pdev);
- pci_disable_device(pdev);
+ id = hinic_get_pci_device_id(nic_dev->pdev);
+ switch (id->driver_data) {
+ case HINIC_BOARD_PG_TP_10GE:
+ return;
+ case HINIC_BOARD_PG_SM_25GE:
+ if (speed == SPEED_10000) {
+ pending_limt =
+ HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT;
+ coalesc_timer_cfg =
+ HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER;
+ } else if (speed == SPEED_25000) {
+ pending_limt =
+ HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT;
+ coalesc_timer_cfg =
+ HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER;
+ } else {
+ pending_limt =
+ HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT;
+ coalesc_timer_cfg =
+ HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER;
+ }
+ break;
+ case HINIC_BOARD_PG_100GE:
+ return;
+ default:
+ return;
+ }
- dev_info(&pdev->dev, "HiNIC driver - removed\n");
-}
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ if (!nic_dev->intr_coalesce[qid].user_set_intr_coal_flag) {
+ err = set_interrupt_moder(nic_dev, qid,
+ coalesc_timer_cfg,
+ pending_limt);
+ if (!err) {
+ nic_dev->intr_coalesce[qid].pending_limt =
+ pending_limt;
+ nic_dev->intr_coalesce[qid].coalesce_timer_cfg =
+ coalesc_timer_cfg;
+ }
+ }
+ }
-static void hinic_shutdown(struct pci_dev *pdev)
-{
- pci_disable_device(pdev);
+ nic_dev->his_link_speed = speed;
}
-static const struct pci_device_id hinic_pci_table[] = {
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0},
- { 0, 0}
-};
-MODULE_DEVICE_TABLE(pci, hinic_pci_table);
-
-static struct pci_driver hinic_driver = {
- .name = HINIC_DRV_NAME,
- .id_table = hinic_pci_table,
- .probe = hinic_probe,
- .remove = hinic_remove,
- .shutdown = hinic_shutdown,
- .sriov_configure = hinic_pci_sriov_configure,
-};
-
-static int __init hinic_module_init(void)
+void nic_event(struct hinic_lld_dev *lld_dev, void *adapter,
+ struct hinic_event_info *event)
{
- int ret;
+ struct hinic_nic_dev *nic_dev = adapter;
+ struct net_device *netdev;
+ enum hinic_event_type type;
- hinic_dbg_register_debugfs(HINIC_DRV_NAME);
+ if (!nic_dev || !event || !hinic_support_nic(lld_dev->hwdev, NULL))
+ return;
- ret = pci_register_driver(&hinic_driver);
- if (ret)
- hinic_dbg_unregister_debugfs();
+ netdev = nic_dev->netdev;
+ type = event->type;
+
+ switch (type) {
+ case HINIC_EVENT_LINK_DOWN:
+ hinic_link_status_change(nic_dev, false);
+ break;
+ case HINIC_EVENT_LINK_UP:
+ hinic_link_status_change(nic_dev, true);
+ hinic_intr_coalesc_change(nic_dev, event);
+ break;
+ case HINIC_EVENT_HEART_LOST:
+ hinic_heart_lost(nic_dev);
+ hinic_link_status_change(nic_dev, false);
+ break;
+ case HINIC_EVENT_FAULT:
+ if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR &&
+ event->info.event.chip.func_id ==
+ hinic_global_func_id(lld_dev->hwdev))
+ hinic_link_status_change(nic_dev, false);
+ break;
+ case HINIC_EVENT_DCB_STATE_CHANGE:
+ if (nic_dev->default_cos_id == event->dcb_state.default_cos)
+ break;
- return ret;
-}
+ /* PF notify to vf, don't need to handle this event */
+ if (!HINIC_FUNC_IS_VF(nic_dev->hwdev))
+ break;
-static void __exit hinic_module_exit(void)
-{
- pci_unregister_driver(&hinic_driver);
- hinic_dbg_unregister_debugfs();
+ nicif_info(nic_dev, drv, netdev, "Change default cos %d to %d\n",
+ nic_dev->default_cos_id,
+ event->dcb_state.default_cos);
+
+ nic_dev->default_cos_id = event->dcb_state.default_cos;
+ hinic_set_sq_default_cos(netdev, nic_dev->default_cos_id);
+ break;
+ case HINIC_EVENT_PORT_MODULE_EVENT:
+ hinic_port_module_event_handler(nic_dev, event);
+ break;
+ case HINIC_EVENT_MGMT_WATCHDOG_EVENT:
+ hinic_link_status_change(nic_dev, false);
+ break;
+ default:
+ break;
+ }
}
-module_init(hinic_module_init);
-module_exit(hinic_module_exit);
+struct hinic_uld_info nic_uld_info = {
+ .probe = nic_probe,
+ .remove = nic_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .event = nic_event,
+ .ioctl = nic_ioctl,
+}; /*lint -e766*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c
similarity index 58%
rename from drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
rename to drivers/net/ethernet/huawei/hinic/hinic_mbox.c
index 3f9c31d29215..aad269fe5d1a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c
@@ -1,7 +1,20 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
@@ -10,17 +23,20 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include "hinic_hw_if.h"
+#include "ossl_knl.h"
+#include "hinic_hw.h"
#include "hinic_hw_mgmt.h"
-#include "hinic_hw_csr.h"
-#include "hinic_hw_dev.h"
-#include "hinic_hw_mbox.h"
+#include "hinic_hwdev.h"
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+#include "hinic_eqs.h"
+#include "hinic_mbox.h"
#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0
#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10
#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12
#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14
-/* The size of data to be sended (unit of 4 bytes) */
+/* The size of data to be send (unit of 4 bytes) */
#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20
/* SO_RO(strong order, relax order) */
#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25
@@ -36,14 +52,13 @@
#define HINIC_MBOX_INT_SET(val, field) \
(((val) & HINIC_MBOX_INT_##field##_MASK) << \
- HINIC_MBOX_INT_##field##_SHIFT)
+ HINIC_MBOX_INT_##field##_SHIFT)
enum hinic_mbox_tx_status {
TX_NOT_DONE = 1,
};
#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0
-
/* specifies the issue request for the message data.
* 0 - Tx request is done;
* 1 - Tx request is in process.
@@ -55,7 +70,7 @@ enum hinic_mbox_tx_status {
#define HINIC_MBOX_CTRL_SET(val, field) \
(((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
- HINIC_MBOX_CTRL_##field##_SHIFT)
+ HINIC_MBOX_CTRL_##field##_SHIFT)
#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0
#define HINIC_MBOX_HEADER_MODULE_SHIFT 11
@@ -63,7 +78,6 @@ enum hinic_mbox_tx_status {
#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22
#define HINIC_MBOX_HEADER_SEQID_SHIFT 24
#define HINIC_MBOX_HEADER_LAST_SHIFT 30
-
/* specifies the mailbox message direction
* 0 - send
* 1 - receive
@@ -88,10 +102,10 @@ enum hinic_mbox_tx_status {
#define HINIC_MBOX_HEADER_GET(val, field) \
(((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
- HINIC_MBOX_HEADER_##field##_MASK)
+ HINIC_MBOX_HEADER_##field##_MASK)
#define HINIC_MBOX_HEADER_SET(val, field) \
((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
- HINIC_MBOX_HEADER_##field##_SHIFT)
+ HINIC_MBOX_HEADER_##field##_SHIFT)
#define MBOX_SEGLEN_MASK \
HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
@@ -99,6 +113,7 @@ enum hinic_mbox_tx_status {
#define HINIC_MBOX_SEG_LEN 48
#define HINIC_MBOX_COMP_TIME 8000U
#define MBOX_MSG_POLLING_TIMEOUT 8000
+#define MBOX_MSG_RETRY_ACK_TIMEOUT 1000
#define HINIC_MBOX_DATA_SIZE 2040
@@ -117,6 +132,7 @@ enum hinic_mbox_tx_status {
#define MBOX_WB_STATUS_MASK 0xFF
#define MBOX_WB_ERROR_CODE_MASK 0xFF00
#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
#define MBOX_WB_STATUS_NOT_FINISHED 0x00
#define MBOX_STATUS_FINISHED(wb) \
@@ -128,9 +144,14 @@ enum hinic_mbox_tx_status {
#define SEQ_ID_START_VAL 0
#define SEQ_ID_MAX_VAL 42
+#define MBOX_LAST_SEG_MAX_LEN (MBOX_MAX_BUF_SZ - \
+ SEQ_ID_MAX_VAL * MBOX_SEG_LEN)
+#define DST_AEQ_IDX_DEFAULT_VAL 0
+#define SRC_AEQ_IDX_DEFAULT_VAL 0
#define NO_DMA_ATTRIBUTE_VAL 0
+#define HINIC_MGMT_RSP_AEQN 0
#define HINIC_MBOX_RSP_AEQN 2
#define HINIC_MBOX_RECV_AEQN 0
@@ -138,33 +159,24 @@ enum hinic_mbox_tx_status {
#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ)
#define MBOX_AREA(hwif) \
- ((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
+ ((hwif)->cfg_regs_base + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
+#define MBOX_RESPONSE_ERROR 0x1
#define MBOX_MSG_ID_MASK 0xFF
#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
-#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
- (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
+#define MBOX_MSG_ID_INC(func_to_func) (MBOX_MSG_ID(func_to_func) = \
+ (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK)
#define FUNC_ID_OFF_SET_8B 8
-
-/* max message counter wait to process for one function */
-#define HINIC_MAX_MSG_CNT_TO_PROCESS 10
-
-#define HINIC_QUEUE_MIN_DEPTH 6
-#define HINIC_QUEUE_MAX_DEPTH 12
-#define HINIC_MAX_RX_BUFFER_SIZE 15
+#define FUNC_ID_OFF_SET_10B 10
enum hinic_hwif_direction_type {
HINIC_HWIF_DIRECT_SEND = 0,
HINIC_HWIF_RESPONSE = 1,
};
-enum mbox_send_mod {
- MBOX_SEND_MSG_INT,
-};
-
enum mbox_seg_type {
NOT_LAST_SEG,
LAST_SEG,
@@ -183,23 +195,34 @@ enum mbox_aeq_trig_type {
TRIGGER,
};
+struct hinic_set_random_id {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 vf_in_pf;
+ u8 rsvd1;
+ u16 func_idx;
+ u32 random_id;
+};
+
static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx,
const void *buf_in, u16 in_size, u16 offset)
{
u16 func_idx;
if (in_size < offset + sizeof(func_idx)) {
- dev_warn(&hwdev->hwif->pdev->dev,
- "Receive mailbox msg len: %d less than %d Bytes is invalid\n",
- in_size, offset);
+ sdk_warn(hwdev->dev_hdl,
+ "Receive mailbox msg len: %d less than %ld Bytes is invalid\n",
+ in_size, offset + sizeof(func_idx));
return false;
}
func_idx = *((u16 *)((u8 *)buf_in + offset));
if (src_func_idx != func_idx) {
- dev_warn(&hwdev->hwif->pdev->dev,
- "Receive mailbox function id: 0x%x not equal to msg function id: 0x%x\n",
+ sdk_warn(hwdev->dev_hdl,
+ "Reveive mailbox function id(0x%x) not equal to msg function id(0x%x)\n",
src_func_idx, func_idx);
return false;
}
@@ -214,6 +237,43 @@ bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
FUNC_ID_OFF_SET_8B);
}
+bool hinic_mbox_check_func_id_10B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ return check_func_id(hwdev, func_idx, buf_in, in_size,
+ FUNC_ID_OFF_SET_10B);
+}
+
+static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, void *msg,
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info);
+
+/**
+ * hinic_register_ppf_mbox_cb - register mbox callback for ppf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_ppf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_ppf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->ppf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
/**
* hinic_register_pf_mbox_cb - register mbox callback for pf
* @hwdev: the pointer to hw device
@@ -261,9 +321,52 @@ int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
}
/**
- * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf
- * @hwdev: the pointer to hw device
- * @mod: specific mod that the callback will handle
+ * hinic_register_ppf_to_pf_mbox_cb - register mbox callback for pf from ppf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_recv_from_ppf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->pf_recv_from_ppf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_PPF_TO_PF_MBOX_CB_REG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for ppf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_ppf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_PPF_MBOX_CB_RUNNING,
+ &func_to_func->ppf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->ppf_mbox_cb[mod] = NULL;
+}
+
+/**
+ * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for pf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
*/
void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
enum hinic_mod_type mod)
@@ -281,8 +384,8 @@ void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
/**
* hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
- * @hwdev: the pointer to hw device
- * @mod: specific mod that the callback will handle
+ * @hwdev:the pointer to hw device
+ * @mod:specific mod that the callback will handle
*/
void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
enum hinic_mod_type mod)
@@ -298,16 +401,45 @@ void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
func_to_func->vf_mbox_cb[mod] = NULL;
}
+/**
+ * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for pf from ppf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_PPF_TO_PF_MBOX_CB_REG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->pf_recv_from_ppf_mbox_cb[mod] = NULL;
+}
+
+int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_mbox_func_to_func *func_to_func = handle;
+
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Not support vf command yet/n");
+ return -EFAULT;
+}
+
static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
struct hinic_recv_mbox *recv_mbox,
void *buf_out, u16 *out_size)
{
hinic_vf_mbox_cb cb;
- int ret = 0;
+ int ret;
if (recv_mbox->mod >= HINIC_MOD_MAX) {
- dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
- recv_mbox->mod);
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
return -EINVAL;
}
@@ -317,10 +449,10 @@ static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
&func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
- cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
- recv_mbox->mbox_len, buf_out, out_size);
+ ret = cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
+ recv_mbox->mbox_len, buf_out, out_size);
} else {
- dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n");
+ sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not registered\n");
ret = -EINVAL;
}
@@ -330,6 +462,76 @@ static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
return ret;
}
+static int
+recv_pf_from_ppf_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ void *buf_out, u16 *out_size)
+{
+ hinic_pf_recv_from_ppf_mbox_cb cb;
+ enum hinic_mod_type mod = recv_mbox->mod;
+ int ret;
+
+ if (mod >= HINIC_MOD_MAX) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n",
+ mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]);
+
+ cb = func_to_func->pf_recv_from_ppf_mbox_cb[mod];
+ if (cb && test_bit(HINIC_PPF_TO_PF_MBOX_CB_REG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod])) {
+ ret = cb(func_to_func->hwdev, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len,
+ buf_out, out_size);
+ } else {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "PF recvice ppf mailbox callback is not registered\n");
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+ &func_to_func->ppf_to_pf_mbox_cb_state[mod]);
+
+ return ret;
+}
+
+static int recv_ppf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u8 pf_id, void *buf_out, u16 *out_size)
+{
+ hinic_ppf_mbox_cb cb;
+ u16 vf_id = 0;
+ int ret;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_PPF_MBOX_CB_RUNNING,
+ &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->ppf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_PPF_MBOX_CB_REG,
+ &func_to_func->ppf_mbox_cb_state[recv_mbox->mod])) {
+ ret = cb(func_to_func->hwdev, pf_id, vf_id, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len,
+ buf_out, out_size);
+ } else {
+ sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod = %d\n",
+ recv_mbox->mod);
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_PPF_MBOX_CB_RUNNING,
+ &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
+}
+
static int
recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
struct hinic_recv_mbox *recv_mbox,
@@ -341,8 +543,8 @@ recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
int ret;
if (recv_mbox->mod >= HINIC_MOD_MAX) {
- dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
- recv_mbox->mod);
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
return -EINVAL;
}
@@ -353,13 +555,13 @@ recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
&func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
vf_id = src_func_idx -
- hinic_glb_pf_vf_offset(func_to_func->hwif);
+ hinic_glb_pf_vf_offset(func_to_func->hwdev);
ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
recv_mbox->mbox, recv_mbox->mbox_len,
buf_out, out_size);
} else {
- dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n",
- recv_mbox->mod);
+ sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not registered\n",
+ recv_mbox->mod);
ret = -EINVAL;
}
@@ -369,11 +571,102 @@ recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
return ret;
}
+bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
+ struct vf_cmd_check_handle *cmd_handle,
+ u16 vf_id, u8 cmd, void *buf_in, u16 in_size,
+ u8 size)
+{
+ u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev);
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (cmd == cmd_handle[i].cmd) {
+ if (cmd_handle[i].check_cmd)
+ return cmd_handle[i].check_cmd(hwdev, src_idx,
+ buf_in, in_size);
+ else
+ return true;
+ }
+ }
+
+ sdk_err(hwdev->dev_hdl, "Unsupported vf cmd %d\n", cmd);
+
+ return false;
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx)
+{
+ struct hinic_hwdev *dev = func_to_func->hwdev;
+ struct mbox_msg_info msg_info = {0};
+ u16 out_size = MBOX_MAX_BUF_SZ;
+ void *buf_out = recv_mbox->buf_out;
+ int err = 0;
+
+ if (HINIC_IS_VF(dev)) {
+ err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
+ &out_size);
+ } else { /* pf/ppf process */
+
+ if (IS_PF_OR_PPF_SRC(src_func_idx)) {
+ if (HINIC_IS_PPF(dev)) {
+ err = recv_ppf_mbox_handler(func_to_func,
+ recv_mbox,
+ (u8)src_func_idx,
+ buf_out, &out_size);
+ if (err)
+ goto out;
+ } else {
+ err = recv_pf_from_ppf_handler(func_to_func,
+ recv_mbox,
+ buf_out,
+ &out_size);
+ if (err)
+ goto out;
+ }
+ /* The source is neither PF nor PPF, so it is from VF */
+ } else {
+ err = recv_pf_from_vf_mbox_handler(func_to_func,
+ recv_mbox,
+ src_func_idx,
+ buf_out, &out_size);
+ }
+ }
+
+out:
+ if (recv_mbox->ack_type == MBOX_ACK) {
+ msg_info.msg_id = recv_mbox->msg_info.msg_id;
+ if (err == HINIC_DEV_BUSY_ACTIVE_FW ||
+ err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
+ else if (err == HINIC_MBOX_VF_CMD_ERROR)
+ msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
+ else if (err)
+ msg_info.status = HINIC_MBOX_PF_SEND_ERR;
+
+ /* if not data need to response, set out_size to 1 */
+ if (!out_size || err)
+ out_size = MBOX_MSG_NO_DATA_LEN;
+
+ send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
+ buf_out, out_size, src_func_idx,
+ HINIC_HWIF_RESPONSE, MBOX_ACK,
+ &msg_info);
+ }
+
+ kfree(recv_mbox->buf_out);
+ kfree(recv_mbox->mbox);
+ kfree(recv_mbox);
+}
+
static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
u8 seq_id, u8 seg_len)
{
if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
return false;
+ else if (seq_id == SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN)
+ return false;
if (seq_id == 0) {
recv_mbox->seq_id = seq_id;
@@ -395,51 +688,41 @@ static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
func_to_func->event_flag == EVENT_START)
complete(&recv_mbox->recv_done);
else
- dev_err(&func_to_func->hwif->pdev->dev,
+ sdk_err(func_to_func->hwdev->dev_hdl,
"Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n",
func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
recv_mbox->msg_info.status);
spin_unlock(&func_to_func->mbox_lock);
}
-static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
- struct hinic_recv_mbox *recv_mbox,
- u16 src_func_idx);
-
static void recv_func_mbox_work_handler(struct work_struct *work)
{
struct hinic_mbox_work *mbox_work =
container_of(work, struct hinic_mbox_work, work);
- struct hinic_recv_mbox *recv_mbox;
recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
mbox_work->src_func_idx);
- recv_mbox =
- &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx];
-
- atomic_dec(&recv_mbox->msg_cnt);
-
kfree(mbox_work);
}
static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
void *header, struct hinic_recv_mbox *recv_mbox)
{
+ u64 mbox_header = *((u64 *)header);
void *mbox_body = MBOX_BODY_FROM_HDR(header);
struct hinic_recv_mbox *rcv_mbox_temp = NULL;
- u64 mbox_header = *((u64 *)header);
- struct hinic_mbox_work *mbox_work;
- u8 seq_id, seg_len;
u16 src_func_idx;
+ struct hinic_mbox_work *mbox_work;
int pos;
+ u8 seq_id, seg_len;
seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
- dev_err(&func_to_func->hwif->pdev->dev,
+ sdk_err(func_to_func->hwdev->dev_hdl,
"Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
recv_mbox->seq_id = SEQ_ID_MAX_VAL;
@@ -467,76 +750,72 @@ static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
return;
}
- if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) {
- dev_warn(&func_to_func->hwif->pdev->dev,
- "This function(%u) have %d message wait to process,can't add to work queue\n",
- src_func_idx, atomic_read(&recv_mbox->msg_cnt));
- return;
- }
-
- rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL);
+ rcv_mbox_temp = kzalloc(sizeof(*rcv_mbox_temp), GFP_KERNEL);
if (!rcv_mbox_temp)
return;
- rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ,
- GFP_KERNEL);
+ memcpy(rcv_mbox_temp, recv_mbox, sizeof(*rcv_mbox_temp));
+
+ rcv_mbox_temp->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
if (!rcv_mbox_temp->mbox)
- goto err_alloc_rcv_mbox_msg;
+ goto rcv_mbox_msg_err;
+
+ memcpy(rcv_mbox_temp->mbox, recv_mbox->mbox, MBOX_MAX_BUF_SZ);
rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
- if (!rcv_mbox_temp->buf_out)
- goto err_alloc_rcv_mbox_buf;
+ if (!rcv_mbox_temp->buf_out) {
+ sdk_err(func_to_func->hwdev->dev_hdl, "Allocate receive mbox out buffer memory failed.\n");
+ goto rcv_mbox_buf_err;
+ }
mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
- if (!mbox_work)
- goto err_alloc_mbox_work;
+ if (!mbox_work) {
+ sdk_err(func_to_func->hwdev->dev_hdl, "Allocate mbox work memory failed.\n");
+ goto mbox_work_err;
+ }
mbox_work->func_to_func = func_to_func;
mbox_work->recv_mbox = rcv_mbox_temp;
- mbox_work->src_func_idx = src_func_idx;
- atomic_inc(&recv_mbox->msg_cnt);
+ mbox_work->src_func_idx = src_func_idx;
INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
queue_work(func_to_func->workq, &mbox_work->work);
return;
-err_alloc_mbox_work:
+mbox_work_err:
kfree(rcv_mbox_temp->buf_out);
-err_alloc_rcv_mbox_buf:
+rcv_mbox_buf_err:
kfree(rcv_mbox_temp->mbox);
-err_alloc_rcv_mbox_msg:
+rcv_mbox_msg_err:
kfree(rcv_mbox_temp);
}
-static int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id)
+int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id)
{
struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
struct hinic_set_random_id rand_info = {0};
u16 out_size = sizeof(rand_info);
- struct hinic_pfhwdev *pfhwdev;
int ret;
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
rand_info.version = HINIC_CMD_VER_FUNC_ID;
rand_info.func_idx = func_id;
- rand_info.vf_in_pf = func_id - hinic_glb_pf_vf_offset(hwdev->hwif);
- rand_info.random_id = get_random_u32();
+ rand_info.vf_in_pf = (u8)(func_id - hinic_glb_pf_vf_offset(hwdev));
+ get_random_bytes(&rand_info.random_id, sizeof(u32));
func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id;
- ret = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_MGMT_CMD_SET_VF_RANDOM_ID,
- &rand_info, sizeof(rand_info),
- &rand_info, &out_size, HINIC_MGMT_MSG_SYNC);
+ ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+ HINIC_MGMT_CMD_SET_VF_RANDOM_ID,
+ &rand_info, sizeof(rand_info),
+ &rand_info, &out_size, 0);
if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
rand_info.status) || !out_size || ret) {
- dev_err(&hwdev->hwif->pdev->dev, "Set VF random id failed, err: %d, status: 0x%x, out size: 0x%x\n",
+ sdk_err(hwdev->dev_hdl, "Failed to set vf random id, err: %d, status: 0x%x, out size: 0x%x\n",
ret, rand_info.status, out_size);
- return -EIO;
+ return -EINVAL;
}
if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED)
@@ -552,42 +831,40 @@ static void update_random_id_work_handler(struct work_struct *work)
{
struct hinic_mbox_work *mbox_work =
container_of(work, struct hinic_mbox_work, work);
- struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_mbox_func_to_func *func_to_func = mbox_work->func_to_func;
u16 src = mbox_work->src_func_idx;
+ int err;
- func_to_func = mbox_work->func_to_func;
-
- if (set_vf_mbox_random_id(func_to_func->hwdev, src))
- dev_warn(&func_to_func->hwdev->hwif->pdev->dev, "Update VF id: 0x%x random id failed\n",
+ err = set_vf_mbox_random_id(func_to_func->hwdev, src);
+ if (err)
+ sdk_warn(func_to_func->hwdev->dev_hdl, "Update vf id(0x%x) random id fail\n",
mbox_work->src_func_idx);
kfree(mbox_work);
}
-static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func,
- u8 *header)
+bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func,
+ u8 *header)
{
struct hinic_hwdev *hwdev = func_to_func->hwdev;
- struct hinic_mbox_work *mbox_work = NULL;
u64 mbox_header = *((u64 *)header);
- u16 offset, src;
+ struct hinic_mbox_work *mbox_work;
u32 random_id;
+ u16 offset, src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
int vf_in_pf;
- src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
-
if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random)
return true;
- if (!HINIC_IS_PPF(hwdev->hwif)) {
- offset = hinic_glb_pf_vf_offset(hwdev->hwif);
+ if (!HINIC_IS_PPF(hwdev)) {
+ offset = hinic_glb_pf_vf_offset(hwdev);
vf_in_pf = src - offset;
- if (vf_in_pf < 1 || vf_in_pf > hwdev->nic_cap.max_vf) {
- dev_warn(&hwdev->hwif->pdev->dev,
+ if (vf_in_pf < 1 || vf_in_pf > hinic_func_max_vf(hwdev)) {
+ sdk_warn(hwdev->dev_hdl,
"Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n",
- src, offset + 1,
- hwdev->nic_cap.max_vf + offset);
+ src, (offset + 1),
+ (hinic_func_max_vf(hwdev) + offset));
return false;
}
}
@@ -599,9 +876,9 @@ static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func
random_id == func_to_func->vf_mbx_old_rand_id[src])
return true;
- dev_warn(&hwdev->hwif->pdev->dev,
- "The mailbox random id(0x%x) of func_id(0x%x) doesn't match with pf reservation(0x%x)\n",
- random_id, src, func_to_func->vf_mbx_rand_id[src]);
+ sdk_warn(hwdev->dev_hdl,
+ "Receive func_id(0x%x) mailbox random id(0x%x) mismatch with pf reserve(0x%x)\n",
+ src, random_id, func_to_func->vf_mbx_rand_id[src]);
mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
if (!mbox_work)
@@ -616,11 +893,11 @@ static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func
return false;
}
-static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+void hinic_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
- u64 mbox_header = *((u64 *)header);
struct hinic_recv_mbox *recv_mbox;
+ u64 mbox_header = *((u64 *)header);
u64 src, dir;
func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
@@ -629,8 +906,9 @@ static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
if (src >= HINIC_MAX_FUNCTIONS) {
- dev_err(&func_to_func->hwif->pdev->dev,
- "Mailbox source function id:%u is invalid\n", (u32)src);
+ sdk_err(func_to_func->hwdev->dev_hdl,
+ "Mailbox source function id: %u is invalid\n",
+ (u32)src);
return;
}
@@ -644,7 +922,7 @@ static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
}
-static void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+void hinic_mbox_self_aeqe_handler(void *handle, u8 *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
struct hinic_send_mbox *send_mbox;
@@ -666,8 +944,8 @@ static void clear_mbox_status(struct hinic_send_mbox *mbox)
static void mbox_copy_header(struct hinic_hwdev *hwdev,
struct hinic_send_mbox *mbox, u64 *header)
{
- u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
u32 *data = (u32 *)header;
+ u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
for (i = 0; i < idx_max; i++)
__raw_writel(*(data + i), mbox->data + i * sizeof(u32));
@@ -677,10 +955,10 @@ static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
struct hinic_send_mbox *mbox, void *seg,
u16 seg_len)
{
- u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
- u32 data_len, chk_sz = sizeof(u32);
u32 *data = seg;
+ u32 data_len, chk_sz = sizeof(u32);
u32 i, idx_max;
+ u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
/* The mbox message should be aligned in 4 bytes. */
if (seg_len % chk_sz) {
@@ -691,16 +969,16 @@ static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
data_len = seg_len;
idx_max = ALIGN(data_len, chk_sz) / chk_sz;
- for (i = 0; i < idx_max; i++)
+ for (i = 0; i < idx_max; i++) {
__raw_writel(*(data + i),
mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
+ }
}
static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
- u16 dst_func, u16 dst_aeqn, u16 seg_len,
- int poll)
+ u16 dst_func, u16 dst_aeqn, u16 rsp_aeq,
+ u16 seg_len, int poll)
{
- u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
u32 mbox_int, mbox_ctrl;
mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
@@ -713,10 +991,10 @@ static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
- hinic_hwif_write_reg(func_to_func->hwif,
+ hinic_hwif_write_reg(func_to_func->hwdev->hwif,
HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
- wmb(); /* writing the mbox int attributes */
+ wmb(); /* writing the mbox int attributes */
mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
if (poll)
@@ -724,22 +1002,20 @@ static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
else
mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
- hinic_hwif_write_reg(func_to_func->hwif,
+ hinic_hwif_write_reg(func_to_func->hwdev->hwif,
HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
}
-static void dump_mox_reg(struct hinic_hwdev *hwdev)
+void dump_mox_reg(struct hinic_hwdev *hwdev)
{
u32 val;
val = hinic_hwif_read_reg(hwdev->hwif,
HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
- dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val);
-
+ sdk_err(hwdev->dev_hdl, "Mailbox control reg: 0x%x\n", val);
val = hinic_hwif_read_reg(hwdev->hwif,
HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
- dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n",
- val);
+ sdk_err(hwdev->dev_hdl, "Mailbox interrupt offset: 0x%x\n", val);
}
static u16 get_mbox_status(struct hinic_send_mbox *mbox)
@@ -752,43 +1028,34 @@ static u16 get_mbox_status(struct hinic_send_mbox *mbox)
return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
}
-static int
-wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
- int poll, u16 *wb_status)
+static u16 mbox_msg_ack_aeqn(struct hinic_hwdev *hwdev,
+ enum hinic_hwif_direction_type seq_dir)
{
- struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
- struct hinic_hwdev *hwdev = func_to_func->hwdev;
- struct completion *done = &send_mbox->send_done;
- u32 cnt = 0;
- unsigned long jif;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ u16 dst_aeqn;
- if (poll) {
- while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
- *wb_status = get_mbox_status(send_mbox);
- if (MBOX_STATUS_FINISHED(*wb_status))
- break;
+ if (num_aeqs >= HINIC_HW_MAX_AEQS)
+ dst_aeqn = HINIC_MBOX_RSP_AEQN;
+ else
+ dst_aeqn = 0;
- usleep_range(900, 1000);
- cnt++;
- }
+ return dst_aeqn;
+}
- if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
- dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
- *wb_status);
- dump_mox_reg(hwdev);
- return -ETIMEDOUT;
- }
- } else {
- jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
- if (!wait_for_completion_timeout(done, jif)) {
- dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
- dump_mox_reg(hwdev);
- hinic_dump_aeq_info(hwdev);
- return -ETIMEDOUT;
- }
+static int mbox_retry_get_ack(struct hinic_mbox_func_to_func *func_to_func,
+ struct completion *done, u16 aeq_id)
+{
+ ulong timeo = msecs_to_jiffies(MBOX_MSG_RETRY_ACK_TIMEOUT);
+ int err;
- *wb_status = get_mbox_status(send_mbox);
- }
+ init_completion(done);
+
+ err = hinic_reschedule_eq(func_to_func->hwdev, HINIC_AEQ, aeq_id);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(done, timeo))
+ return -ETIMEDOUT;
return 0;
}
@@ -798,18 +1065,22 @@ static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
int poll, void *msg_info)
{
struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
- u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
struct hinic_hwdev *hwdev = func_to_func->hwdev;
- struct completion *done = &send_mbox->send_done;
u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
- u16 dst_aeqn, wb_status = 0, errcode;
+ u16 dst_aeqn, wb_status = 0, errcode, rsp_aeq;
+ u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
+ struct completion *done = &send_mbox->send_done;
+ ulong jif;
+ u32 cnt = 0;
- if (num_aeqs >= 4)
+ if (num_aeqs >= HINIC_HW_MAX_AEQS)
dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
else
dst_aeqn = 0;
+ rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
+
if (!poll)
init_completion(done);
@@ -819,15 +1090,42 @@ static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
- write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
+ write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, rsp_aeq,
+ seg_len, poll);
- wmb(); /* writing the mbox msg attributes */
+ wmb(); /* writing the mbox msg attributes */
- if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status))
- return -ETIMEDOUT;
+ if (poll) {
+ while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
+ wb_status = get_mbox_status(send_mbox);
+ if (MBOX_STATUS_FINISHED(wb_status))
+ break;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
+ sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout, wb status: 0x%x\n",
+ wb_status);
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+ } else {
+ jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(done, jif) &&
+ mbox_retry_get_ack(func_to_func, done, rsp_aeq)) {
+ sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout\n");
+ dump_mox_reg(hwdev);
+ hinic_dump_aeq_info(hwdev);
+ return -ETIMEDOUT;
+ }
+
+ wb_status = get_mbox_status(send_mbox);
+ }
if (!MBOX_STATUS_SUCCESS(wb_status)) {
- dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n",
+ sdk_err(hwdev->dev_hdl, "Send mailbox segment to function %d error, wb status: 0x%x\n",
dst_func, wb_status);
errcode = MBOX_STATUS_ERRCODE(wb_status);
return errcode ? errcode : -EFAULT;
@@ -838,18 +1136,18 @@ static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
enum hinic_mod_type mod, u16 cmd, void *msg,
- u16 msg_len, u16 dst_func,
- enum hinic_hwif_direction_type direction,
- enum hinic_mbox_ack_type ack_type,
- struct mbox_msg_info *msg_info)
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
{
struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ int err = 0;
+ u32 seq_id = 0;
u16 seg_len = MBOX_SEG_LEN;
- u8 *msg_seg = (u8 *)msg;
u16 left = msg_len;
- u32 seq_id = 0;
+ u8 *msg_seg = (u8 *)msg;
u64 header = 0;
- int err = 0;
down(&func_to_func->msg_send_sem);
@@ -864,7 +1162,7 @@ static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
/* The vf's offset to it's associated pf */
HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
- HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
+ HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev),
SRC_GLB_FUNC_IDX);
while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
@@ -877,11 +1175,12 @@ static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
}
err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
- seg_len, MBOX_SEND_MSG_INT, msg_info);
+ seg_len, func_to_func->send_ack_mod,
+ msg_info);
if (err) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
+ sdk_err(hwdev->dev_hdl, "Failed to send mbox seg, seq_id=0x%llx\n",
HINIC_MBOX_HEADER_GET(header, SEQID));
- goto err_send_mbox_seg;
+ goto send_err;
}
left -= HINIC_MBOX_SEG_LEN;
@@ -893,68 +1192,12 @@ static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
}
-err_send_mbox_seg:
+send_err:
up(&func_to_func->msg_send_sem);
return err;
}
-static void
-response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func,
- struct hinic_recv_mbox *recv_mbox, int err,
- u16 out_size, u16 src_func_idx)
-{
- struct mbox_msg_info msg_info = {0};
-
- if (recv_mbox->ack_type == MBOX_ACK) {
- msg_info.msg_id = recv_mbox->msg_info.msg_id;
- if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
- msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
- else if (err == HINIC_MBOX_VF_CMD_ERROR)
- msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
- else if (err)
- msg_info.status = HINIC_MBOX_PF_SEND_ERR;
-
- /* if no data needs to response, set out_size to 1 */
- if (!out_size || err)
- out_size = MBOX_MSG_NO_DATA_LEN;
-
- send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
- recv_mbox->buf_out, out_size, src_func_idx,
- HINIC_HWIF_RESPONSE, MBOX_ACK,
- &msg_info);
- }
-}
-
-static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
- struct hinic_recv_mbox *recv_mbox,
- u16 src_func_idx)
-{
- void *buf_out = recv_mbox->buf_out;
- u16 out_size = MBOX_MAX_BUF_SZ;
- int err = 0;
-
- if (HINIC_IS_VF(func_to_func->hwif)) {
- err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
- &out_size);
- } else {
- if (IS_PF_OR_PPF_SRC(src_func_idx))
- dev_warn(&func_to_func->hwif->pdev->dev,
- "Unsupported pf2pf mbox msg\n");
- else
- err = recv_pf_from_vf_mbox_handler(func_to_func,
- recv_mbox,
- src_func_idx,
- buf_out, &out_size);
- }
-
- response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size,
- src_func_idx);
- kfree(recv_mbox->buf_out);
- kfree(recv_mbox->mbox);
- kfree(recv_mbox);
-}
-
static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
enum mbox_event_state event_flag)
{
@@ -963,49 +1206,20 @@ static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
spin_unlock(&func_to_func->mbox_lock);
}
-static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func,
- struct hinic_recv_mbox *mbox_for_resp,
- enum hinic_mod_type mod, u16 cmd,
- void *buf_out, u16 *out_size)
-{
- int err;
-
- if (mbox_for_resp->msg_info.status) {
- err = mbox_for_resp->msg_info.status;
- if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
- dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n",
- mbox_for_resp->msg_info.status);
- return err;
- }
-
- if (buf_out && out_size) {
- if (*out_size < mbox_for_resp->mbox_len) {
- dev_err(&func_to_func->hwif->pdev->dev,
- "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n",
- mbox_for_resp->mbox_len, mod, cmd, *out_size);
- return -EFAULT;
- }
-
- if (mbox_for_resp->mbox_len)
- memcpy(buf_out, mbox_for_resp->mbox,
- mbox_for_resp->mbox_len);
-
- *out_size = mbox_for_resp->mbox_len;
- }
-
- return 0;
-}
-
int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
enum hinic_mod_type mod, u16 cmd, u16 dst_func,
void *buf_in, u16 in_size, void *buf_out,
u16 *out_size, u32 timeout)
{
+ /* use mbox_resp to hole data which responsed from other function */
struct hinic_recv_mbox *mbox_for_resp;
struct mbox_msg_info msg_info = {0};
- unsigned long timeo;
+ ulong timeo;
int err;
+ if (!func_to_func->hwdev->chip_present_flag)
+ return -EPERM;
+
mbox_for_resp = &func_to_func->mbox_resp[dst_func];
down(&func_to_func->mbox_send_sem);
@@ -1020,28 +1234,53 @@ int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
&msg_info);
if (err) {
- dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n",
- msg_info.msg_id);
+ sdk_err(func_to_func->hwdev->dev_hdl, "Send mailbox mod %d cmd 0x%x failed, msg_id: %d\n",
+ mod, cmd, msg_info.msg_id);
set_mbox_to_func_event(func_to_func, EVENT_FAIL);
- goto err_send_mbox;
+ goto send_err;
}
timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
- if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
+ if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo) &&
+ mbox_retry_get_ack(func_to_func, &mbox_for_resp->recv_done,
+ mbox_msg_ack_aeqn(func_to_func->hwdev,
+ HINIC_HWIF_DIRECT_SEND))) {
set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
- dev_err(&func_to_func->hwif->pdev->dev,
- "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
+ sdk_err(func_to_func->hwdev->dev_hdl,
+ "Send mbox msg mod %d cmd 0x%x timeout, msg_id: %d\n",
+ mod, cmd, msg_info.msg_id);
hinic_dump_aeq_info(func_to_func->hwdev);
err = -ETIMEDOUT;
- goto err_send_mbox;
+ goto send_err;
}
set_mbox_to_func_event(func_to_func, EVENT_END);
- err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd,
- buf_out, out_size);
+ if (mbox_for_resp->msg_info.status) {
+ err = mbox_for_resp->msg_info.status;
+ if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ sdk_err(func_to_func->hwdev->dev_hdl, "Mbox response error(0x%x)\n",
+ mbox_for_resp->msg_info.status);
+ goto send_err;
+ }
+
+ if (buf_out && out_size) {
+ if (*out_size < mbox_for_resp->mbox_len) {
+ sdk_err(func_to_func->hwdev->dev_hdl,
+ "Invalid response mbox message length: %d for mod %d cmd 0x%x, should less than: %d\n",
+ mbox_for_resp->mbox_len, mod, cmd, *out_size);
+ err = -EFAULT;
+ goto send_err;
+ }
+
+ if (mbox_for_resp->mbox_len)
+ memcpy(buf_out, mbox_for_resp->mbox,
+ mbox_for_resp->mbox_len);
+
+ *out_size = mbox_for_resp->mbox_len;
+ }
-err_send_mbox:
+send_err:
up(&func_to_func->mbox_send_sem);
return err;
@@ -1050,8 +1289,11 @@ int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
void *buf_in, u16 in_size)
{
+ if (!buf_in || !in_size)
+ return -EINVAL;
+
if (in_size > HINIC_MBOX_DATA_SIZE) {
- dev_err(&func_to_func->hwif->pdev->dev,
+ sdk_err(func_to_func->hwdev->dev_hdl,
"Mbox msg len(%d) exceed limit(%d)\n",
in_size, HINIC_MBOX_DATA_SIZE);
return -EINVAL;
@@ -1060,6 +1302,47 @@ static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
return 0;
}
+int hinic_mbox_to_host(struct hinic_hwdev *hwdev, u16 dest_host_ppf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err;
+
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (!HINIC_IS_PPF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Params error, only ppf can send message to other host, func_type: %d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, dest_host_ppf_id,
+ buf_in, in_size, buf_out, out_size, timeout);
+}
+
+int hinic_mbox_to_ppf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF(hwdev) || HINIC_IS_PPF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, hinic_ppf_idx(hwdev),
+ buf_in, in_size, buf_out, out_size, timeout);
+}
+
int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
enum hinic_mod_type mod, u8 cmd, void *buf_in,
u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
@@ -1070,41 +1353,86 @@ int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
if (err)
return err;
- if (!HINIC_IS_VF(hwdev->hwif)) {
- dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
- HINIC_FUNC_TYPE(hwdev->hwif));
+ if (!HINIC_IS_VF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n",
+ hinic_func_type(hwdev));
return -EINVAL;
}
- return hinic_mbox_to_func(func_to_func, mod, cmd,
- hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in,
- in_size, buf_out, out_size, timeout);
+ err = hinic_func_own_get(hwdev);
+ if (err)
+ return err;
+
+ /* port_to_port_idx - imply which PCIE interface PF is connected */
+ err = hinic_mbox_to_func(func_to_func, mod, cmd,
+ hinic_pf_id_of_vf_hw(hwdev), buf_in, in_size,
+ buf_out, out_size, timeout);
+ hinic_func_own_free(hwdev);
+ return err;
}
-int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
- enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+int hinic_mbox_to_func_no_ack(struct hinic_hwdev *hwdev, u16 func_idx,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size)
+{
+ struct mbox_msg_info msg_info = {0};
+ int err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size);
+
+ if (err)
+ return err;
+
+ down(&hwdev->func_to_func->mbox_send_sem);
+
+ err = send_mbox_to_func(hwdev->func_to_func, mod, cmd, buf_in, in_size,
+ func_idx, HINIC_HWIF_DIRECT_SEND, MBOX_NO_ACK,
+ &msg_info);
+ if (err)
+ sdk_err(hwdev->dev_hdl, "Send mailbox no ack failed\n");
+
+ up(&hwdev->func_to_func->mbox_send_sem);
+
+ return err;
+}
+
+int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size)
+{
+ int err;
+
+ err = hinic_func_own_get(hwdev);
+ if (err)
+ return err;
+
+ err = hinic_mbox_to_func_no_ack(hwdev, hinic_pf_id_of_vf_hw(hwdev),
+ mod, cmd, buf_in, in_size);
+ hinic_func_own_free(hwdev);
+ return err;
+}
+
+int __hinic_mbox_to_vf(void *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
{
struct hinic_mbox_func_to_func *func_to_func;
- u16 dst_func_idx;
int err;
+ u16 dst_func_idx;
if (!hwdev)
return -EINVAL;
- func_to_func = hwdev->func_to_func;
+ func_to_func = ((struct hinic_hwdev *)hwdev)->func_to_func;
err = mbox_func_params_valid(func_to_func, buf_in, in_size);
if (err)
return err;
- if (HINIC_IS_VF(hwdev->hwif)) {
- dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
- HINIC_FUNC_TYPE(hwdev->hwif));
+ if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n",
+ hinic_func_type(hwdev));
return -EINVAL;
}
if (!vf_id) {
- dev_err(&hwdev->hwif->pdev->dev,
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
"VF id(%d) error!\n", vf_id);
return -EINVAL;
}
@@ -1112,12 +1440,66 @@ int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
/* vf_offset_to_pf + vf_id is the vf's global function id of vf in
* this pf
*/
- dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ dst_func_idx = hinic_glb_pf_vf_offset(hwdev) + vf_id;
return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
in_size, buf_out, out_size, timeout);
}
+int hinic_mbox_ppf_to_vf(void *hwdev, enum hinic_mod_type mod, u16 func_id,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ func_to_func = ((struct hinic_hwdev *)hwdev)->func_to_func;
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, func_id, buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+EXPORT_SYMBOL(hinic_mbox_ppf_to_vf);
+
+int hinic_mbox_ppf_to_pf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 dst_pf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err;
+
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (!HINIC_IS_PPF(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n",
+ hinic_func_type(hwdev));
+ return -EINVAL;
+ }
+
+ if (hinic_ppf_idx(hwdev) == dst_pf_id) {
+ sdk_err(hwdev->dev_hdl,
+ "Params error, dst_pf_id(0x%x) is ppf\n", dst_pf_id);
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, dst_pf_id, buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
{
int err;
@@ -1131,14 +1513,12 @@ static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
if (!mbox_info->buf_out) {
err = -ENOMEM;
- goto err_alloc_buf_out;
+ goto alloc_buf_out_err;
}
- atomic_set(&mbox_info->msg_cnt, 0);
-
return 0;
-err_alloc_buf_out:
+alloc_buf_out_err:
kfree(mbox_info->mbox);
return err;
@@ -1150,8 +1530,7 @@ static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
kfree(mbox_info->mbox);
}
-static int alloc_mbox_info(struct hinic_hwdev *hwdev,
- struct hinic_recv_mbox *mbox_info)
+static int alloc_mbox_info(struct hinic_recv_mbox *mbox_info)
{
u16 func_idx, i;
int err;
@@ -1159,15 +1538,14 @@ static int alloc_mbox_info(struct hinic_hwdev *hwdev,
for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
err = init_mbox_info(&mbox_info[func_idx]);
if (err) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n",
- func_idx);
- goto err_init_mbox_info;
+ pr_err("Failed to init mbox info\n");
+ goto init_mbox_info_err;
}
}
return 0;
-err_init_mbox_info:
+init_mbox_info_err:
for (i = 0; i < func_idx; i++)
clean_mbox_info(&mbox_info[i]);
@@ -1186,7 +1564,7 @@ static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
{
struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
- send_mbox->data = MBOX_AREA(func_to_func->hwif);
+ send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif);
}
static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
@@ -1195,7 +1573,7 @@ static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
struct hinic_hwdev *hwdev = func_to_func->hwdev;
u32 addr_h, addr_l;
- send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev,
+ send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev_hdl,
MBOX_WB_STATUS_LEN,
&send_mbox->wb_paddr,
GFP_KERNEL);
@@ -1225,241 +1603,107 @@ static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
0);
- dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN,
+ dma_free_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN,
send_mbox->wb_vaddr,
send_mbox->wb_paddr);
}
-bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
- struct vf_cmd_check_handle *cmd_handle,
- u16 vf_id, u8 cmd, void *buf_in,
- u16 in_size, u8 size)
-{
- u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev->hwif);
- int i;
-
- for (i = 0; i < size; i++) {
- if (cmd == cmd_handle[i].cmd) {
- if (cmd_handle[i].check_cmd)
- return cmd_handle[i].check_cmd(hwdev, src_idx,
- buf_in, in_size);
- else
- return true;
- }
- }
-
- dev_err(&hwdev->hwif->pdev->dev,
- "PF Receive VF(%d) unsupported cmd(0x%x)\n",
- vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd);
-
- return false;
-}
-
-static bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
- struct hinic_cmdq_ctxt *cmdq_ctxt)
-{
- struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
- u64 curr_pg_pfn, wq_block_pfn;
-
- if (cmdq_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif) ||
- cmdq_ctxt->cmdq_type > HINIC_MAX_CMDQ_TYPES)
- return false;
-
- curr_pg_pfn = HINIC_CMDQ_CTXT_PAGE_INFO_GET
- (ctxt_info->curr_wqe_page_pfn, CURR_WQE_PAGE_PFN);
- wq_block_pfn = HINIC_CMDQ_CTXT_BLOCK_INFO_GET
- (ctxt_info->wq_block_pfn, WQ_BLOCK_PFN);
- /* VF must use 0-level CLA */
- if (curr_pg_pfn != wq_block_pfn)
- return false;
-
- return true;
-}
-
-static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx,
- void *buf_in, u16 in_size)
-{
- if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
- return false;
-
- return hinic_cmdq_check_vf_ctxt(hwdev, buf_in);
-}
-
-#define HW_CTX_QPS_VALID(hw_ctxt) \
- ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \
- (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \
- (hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH && \
- (hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH && \
- (hw_ctxt)->rx_buf_sz_idx <= HINIC_MAX_RX_BUFFER_SIZE)
-
-static bool hw_ctxt_qps_param_valid(struct hinic_cmd_hw_ioctxt *hw_ctxt)
-{
- if (HW_CTX_QPS_VALID(hw_ctxt))
- return true;
-
- if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth &&
- !hw_ctxt->rx_buf_sz_idx)
- return true;
-
- return false;
-}
-
-static bool check_hwctxt(struct hinic_hwdev *hwdev, u16 func_idx,
- void *buf_in, u16 in_size)
+int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev)
{
- struct hinic_cmd_hw_ioctxt *hw_ctxt = buf_in;
-
- if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
- return false;
-
- if (hw_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
- return false;
+ u8 vf_in_pf;
+ int err = 0;
- if (hw_ctxt->set_cmdq_depth) {
- if (hw_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH &&
- hw_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH)
- return true;
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
- return false;
+ for (vf_in_pf = 1; vf_in_pf <= hinic_func_max_vf(hwdev); vf_in_pf++) {
+ err = set_vf_mbox_random_id(hwdev,
+ hinic_glb_pf_vf_offset(hwdev) +
+ vf_in_pf);
+ if (err)
+ break;
}
- return hw_ctxt_qps_param_valid(hw_ctxt);
-}
-
-static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
- void *buf_in, u16 in_size)
-{
- struct hinic_wq_page_size *page_size_info = buf_in;
-
- if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
- return false;
-
- if (page_size_info->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
- return false;
-
- if (((1U << page_size_info->page_size) * SZ_4K) !=
- HINIC_DEFAULT_WQ_PAGE_SIZE)
- return false;
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ hwdev->func_to_func->support_vf_random = false;
+ err = 0;
+ sdk_warn(hwdev->dev_hdl, "Mgmt unsupport set vf random id\n");
+ } else if (!err) {
+ hwdev->func_to_func->support_vf_random = true;
+ sdk_info(hwdev->dev_hdl, "PF Set vf random id success\n");
+ }
- return true;
+ return err;
}
-static struct vf_cmd_check_handle hw_cmd_support_vf[] = {
- {HINIC_COMM_CMD_START_FLR, hinic_mbox_check_func_id_8B},
- {HINIC_COMM_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B},
- {HINIC_COMM_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt},
- {HINIC_COMM_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt},
- {HINIC_COMM_CMD_HWCTXT_SET, check_hwctxt},
- {HINIC_COMM_CMD_HWCTXT_GET, check_hwctxt},
- {HINIC_COMM_CMD_SQ_HI_CI_SET, hinic_mbox_check_func_id_8B},
- {HINIC_COMM_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B},
- {HINIC_COMM_CMD_IO_RES_CLEAR, hinic_mbox_check_func_id_8B},
- {HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
- {HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
- {HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B},
- {HINIC_COMM_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B},
- {HINIC_COMM_CMD_PAGESIZE_SET, check_set_wq_page_size},
-};
-
-static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size)
+void hinic_set_mbox_seg_ack_mod(struct hinic_hwdev *hwdev,
+ enum hinic_mbox_send_mod mod)
{
- u8 size = ARRAY_SIZE(hw_cmd_support_vf);
- struct hinic_hwdev *hwdev = handle;
- struct hinic_pfhwdev *pfhwdev;
- int err = 0;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd,
- buf_in, in_size, size)) {
- dev_err(&hwdev->hwif->pdev->dev,
- "PF Receive VF: %d common cmd: 0x%x or mbox len: 0x%x is invalid\n",
- vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd,
- in_size);
- return HINIC_MBOX_VF_CMD_ERROR;
- }
-
- if (cmd == HINIC_COMM_CMD_START_FLR) {
- *out_size = 0;
- } else {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- cmd, buf_in, in_size, buf_out, out_size,
- HINIC_MGMT_MSG_SYNC);
- if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
- dev_err(&hwdev->hwif->pdev->dev,
- "PF mbox common callback handler err: %d\n",
- err);
- }
+ if (!hwdev || !hwdev->func_to_func)
+ return;
- return err;
+ hwdev->func_to_func->send_ack_mod = mod;
}
int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
{
struct hinic_mbox_func_to_func *func_to_func;
- struct hinic_pfhwdev *pfhwdev;
+ struct card_node *chip_node;
int err;
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
if (!func_to_func)
return -ENOMEM;
hwdev->func_to_func = func_to_func;
func_to_func->hwdev = hwdev;
- func_to_func->hwif = hwdev->hwif;
+ chip_node = hwdev->chip_node;
+ func_to_func->vf_mbx_rand_id = chip_node->vf_mbx_rand_id;
+ func_to_func->vf_mbx_old_rand_id = chip_node->vf_mbx_old_rand_id;
sema_init(&func_to_func->mbox_send_sem, 1);
sema_init(&func_to_func->msg_send_sem, 1);
spin_lock_init(&func_to_func->mbox_lock);
func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
if (!func_to_func->workq) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n");
+ sdk_err(hwdev->dev_hdl, "Failed to initialize MBOX workqueue\n");
err = -ENOMEM;
- goto err_create_mbox_workq;
+ goto create_mbox_workq_err;
}
- err = alloc_mbox_info(hwdev, func_to_func->mbox_send);
+ err = alloc_mbox_info(func_to_func->mbox_send);
if (err) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n");
- goto err_alloc_mbox_for_send;
+ sdk_err(hwdev->dev_hdl, "Alloc mem for mbox_active fail\n");
+ goto alloc_mbox_for_send_err;
}
- err = alloc_mbox_info(hwdev, func_to_func->mbox_resp);
+ err = alloc_mbox_info(func_to_func->mbox_resp);
if (err) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n");
- goto err_alloc_mbox_for_resp;
+ sdk_err(hwdev->dev_hdl, "Alloc mem for mbox_passive fail\n");
+ goto alloc_mbox_for_resp_err;
}
err = alloc_mbox_wb_status(func_to_func);
if (err) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n");
- goto err_alloc_wb_status;
+ sdk_err(hwdev->dev_hdl, "Failed to alloc mbox write back status\n");
+ goto alloc_wb_status_err;
}
prepare_send_mbox(func_to_func);
- hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC,
- &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler);
- hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT,
- &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler);
-
- if (!HINIC_IS_VF(hwdev->hwif))
- hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
- comm_pf_mbox_handler);
+ func_to_func->send_ack_mod = HINIC_MBOX_SEND_MSG_POLL;
return 0;
-err_alloc_wb_status:
+alloc_wb_status_err:
free_mbox_info(func_to_func->mbox_resp);
-err_alloc_mbox_for_resp:
+alloc_mbox_for_resp_err:
free_mbox_info(func_to_func->mbox_send);
-err_alloc_mbox_for_send:
+alloc_mbox_for_send_err:
destroy_workqueue(func_to_func->workq);
-err_create_mbox_workq:
+create_mbox_workq_err:
kfree(func_to_func);
return err;
@@ -1469,10 +1713,6 @@ void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
{
struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
- hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC);
- hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT);
-
- hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
/* destroy workqueue before free related mbox resources in case of
* illegal resource access
*/
@@ -1484,32 +1724,3 @@ void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
kfree(func_to_func);
}
-
-int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev)
-{
- u16 vf_offset;
- u8 vf_in_pf;
- int err = 0;
-
- if (HINIC_IS_VF(hwdev->hwif))
- return 0;
-
- vf_offset = hinic_glb_pf_vf_offset(hwdev->hwif);
-
- for (vf_in_pf = 1; vf_in_pf <= hwdev->nic_cap.max_vf; vf_in_pf++) {
- err = set_vf_mbox_random_id(hwdev, vf_offset + vf_in_pf);
- if (err)
- break;
- }
-
- if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
- hwdev->func_to_func->support_vf_random = false;
- err = 0;
- dev_warn(&hwdev->hwif->pdev->dev, "Mgmt is unsupported to set VF%d random id\n",
- vf_in_pf - 1);
- } else if (!err) {
- hwdev->func_to_func->support_vf_random = true;
- }
-
- return err;
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h
new file mode 100644
index 000000000000..a03a5b8113f2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_MBOX_H_
+#define HINIC_MBOX_H_
+
+#define HINIC_MBOX_PF_SEND_ERR 0x1
+#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2
+#define HINIC_MBOX_VF_CMD_ERROR 0x3
+
+#define HINIC_MAX_FUNCTIONS 512
+#define HINIC_MAX_PF_FUNCS 16
+
+#define HINIC_MBOX_WQ_NAME "hinic_mbox"
+
+enum hinic_mbox_seg_errcode {
+ MBOX_ERRCODE_NO_ERRORS = 0,
+ /* VF send the mailbox data to the wrong destination functions */
+ MBOX_ERRCODE_VF_TO_WRONG_FUNC = 0x100,
+ /* PPF send the mailbox data to the wrong destination functions */
+ MBOX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200,
+ /* PF send the mailbox data to the wrong destination functions */
+ MBOX_ERRCODE_PF_TO_WRONG_FUNC = 0x300,
+ /* The mailbox data size is set to all zero */
+ MBOX_ERRCODE_ZERO_DATA_SIZE = 0x400,
+ /* The sender function attribute has not been learned by CPI hardware */
+ MBOX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500,
+ /* The receiver function attr has not been learned by CPI hardware */
+ MBOX_ERRCODE_UNKNOWN_DES_FUNC = 0x600,
+};
+
+enum hinic_mbox_ack_type {
+ MBOX_ACK,
+ MBOX_NO_ACK,
+};
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status; /*can only use 6 bit*/
+};
+
+struct hinic_recv_mbox {
+ struct completion recv_done;
+ void *mbox;
+ u8 cmd;
+ enum hinic_mod_type mod;
+ u16 mbox_len;
+ void *buf_out;
+ enum hinic_mbox_ack_type ack_type;
+ struct mbox_msg_info msg_info;
+ u8 seq_id;
+};
+
+struct hinic_send_mbox {
+ struct completion send_done;
+ u8 *data;
+
+ u64 *wb_status; /* write back status */
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+typedef int (*hinic_vf_mbox_cb)(void *handle, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+typedef int (*hinic_ppf_mbox_cb)(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+typedef int (*hinic_pf_recv_from_ppf_mbox_cb)(void *handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+enum mbox_event_state {
+ EVENT_START = 0,
+ EVENT_FAIL,
+ EVENT_TIMEOUT,
+ EVENT_END,
+};
+
+enum hinic_mbox_cb_state {
+ HINIC_VF_MBOX_CB_REG = 0,
+ HINIC_VF_MBOX_CB_RUNNING,
+ HINIC_PF_MBOX_CB_REG,
+ HINIC_PF_MBOX_CB_RUNNING,
+ HINIC_PPF_MBOX_CB_REG,
+ HINIC_PPF_MBOX_CB_RUNNING,
+ HINIC_PPF_TO_PF_MBOX_CB_REG,
+ HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+};
+
+enum hinic_mbox_send_mod {
+ HINIC_MBOX_SEND_MSG_INT,
+ HINIC_MBOX_SEND_MSG_POLL,
+};
+
+struct hinic_mbox_func_to_func {
+ struct hinic_hwdev *hwdev;
+
+ struct semaphore mbox_send_sem;
+ struct semaphore msg_send_sem;
+ struct hinic_send_mbox send_mbox;
+
+ struct workqueue_struct *workq;
+
+ struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS];
+ struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS];
+
+ hinic_vf_mbox_cb vf_mbox_cb[HINIC_MOD_MAX];
+ hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX];
+ hinic_ppf_mbox_cb ppf_mbox_cb[HINIC_MOD_MAX];
+ hinic_pf_recv_from_ppf_mbox_cb pf_recv_from_ppf_mbox_cb[HINIC_MOD_MAX];
+ unsigned long ppf_to_pf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long ppf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long pf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long vf_mbox_cb_state[HINIC_MOD_MAX];
+
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+ /* lock for mbox event flag */
+ spinlock_t mbox_lock;
+
+ u32 *vf_mbx_old_rand_id;
+ u32 *vf_mbx_rand_id;
+ bool support_vf_random;
+ enum hinic_mbox_send_mod send_ack_mod;
+};
+
+struct hinic_mbox_work {
+ struct work_struct work;
+ u16 src_func_idx;
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_recv_mbox *recv_mbox;
+};
+
+struct vf_cmd_check_handle {
+ u8 cmd;
+ bool (*check_cmd)(struct hinic_hwdev *hwdev, u16 src_func_idx,
+ void *buf_in, u16 in_size);
+};
+
+int hinic_register_ppf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_ppf_mbox_cb callback);
+
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback);
+
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback);
+
+int hinic_register_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_recv_from_ppf_mbox_cb callback);
+
+void hinic_unregister_ppf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size);
+
+void hinic_mbox_self_aeqe_handler(void *handle, u8 *header, u8 size);
+
+int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev);
+
+bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size);
+
+bool hinic_mbox_check_func_id_10B(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size);
+
+bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
+ struct vf_cmd_check_handle *cmd_handle,
+ u16 vf_id, u8 cmd, void *buf_in, u16 in_size,
+ u8 size);
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_mbox_to_host(struct hinic_hwdev *hwdev, u16 dest_host_ppf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_ppf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_func_no_ack(struct hinic_hwdev *hwdev, u16 func_idx,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size);
+
+int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size);
+
+int hinic_mbox_ppf_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u16 dst_pf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int __hinic_mbox_to_vf(void *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+
+int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+void hinic_set_mbox_seg_ack_mod(struct hinic_hwdev *hwdev,
+ enum hinic_mbox_send_mod mod);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
new file mode 100644
index 000000000000..aaa20ecf8edf
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c
@@ -0,0 +1,1446 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+
+#include "hinic_hwif.h"
+#include "hinic_api_cmd.h"
+#include "hinic_mgmt.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_eqs.h"
+
+#define BUF_OUT_DEFAULT_SIZE 1
+#define SEGMENT_LEN 48
+
+#define MGMT_MSG_MAX_SEQ_ID (ALIGN(HINIC_MSG_TO_MGMT_MAX_LEN, \
+ SEGMENT_LEN) / SEGMENT_LEN)
+
+#define MAX_PF_MGMT_BUF_SIZE 2048UL
+#define MGMT_MSG_LAST_SEG_MAX_LEN (MAX_PF_MGMT_BUF_SIZE - \
+ SEGMENT_LEN * MGMT_MSG_MAX_SEQ_ID)
+
+#define MGMT_MSG_SIZE_MIN 20
+#define MGMT_MSG_SIZE_STEP 16
+#define MGMT_MSG_RSVD_FOR_DEV 8
+
+#define MGMT_MSG_TIMEOUT 5000 /* millisecond */
+
+#define SYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_FLAG 0x200
+
+#define MSG_NO_RESP 0xFFFF
+
+#define MAX_MSG_SZ 2016
+
+#define MAX_CMD_BUF_SIZE 2048ULL
+
+#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ)
+
+#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
+
+#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
+ (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK)
+
+#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id)
+
+#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \
+ ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \
+ | ASYNC_MSG_FLAG)
+
+static void pf_to_mgmt_send_event_set(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ int event_flag)
+{
+ spin_lock_bh(&pf_to_mgmt->sync_event_lock);
+ pf_to_mgmt->event_flag = event_flag;
+ spin_unlock_bh(&pf_to_mgmt->sync_event_lock);
+}
+
+/**
+ * hinic_register_mgmt_msg_cb - register sync msg handler for a module
+ * @hwdev: the pointer to hw device
+ * @mod: module in the chip that this handler will handle its sync messages
+ * @pri_handle: pri handle function
+ * @callback: the handler for a sync message that will handle messages
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod,
+ void *pri_handle, hinic_mgmt_msg_cb callback)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+
+ if (mod >= HINIC_MOD_HW_MAX || !hwdev)
+ return -EFAULT;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return -EINVAL;
+
+ pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback;
+ pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle;
+
+ set_bit(HINIC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_register_mgmt_msg_cb);
+
+/**
+ * hinic_unregister_mgmt_msg_cb - unregister sync msg handler for a module
+ * @hwdev: the pointer to hw device
+ * @mod: module in the chip that this handler will handle its sync messages
+ */
+void hinic_unregister_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+
+ if (!hwdev || mod >= HINIC_MOD_HW_MAX)
+ return;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return;
+
+ clear_bit(HINIC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]);
+
+ while (test_bit(HINIC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL;
+ pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL;
+}
+EXPORT_SYMBOL(hinic_unregister_mgmt_msg_cb);
+
+void hinic_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd,
+ comm_up_self_msg_proc proc)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ u8 cmd_idx;
+
+ if (!hwdev || !proc)
+ return;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return;
+
+ cmd_idx = pf_to_mgmt->proc.cmd_num;
+ if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Register recv up process failed(cmd=0x%x)\r\n", cmd);
+ return;
+ }
+
+ pf_to_mgmt->proc.info[cmd_idx].cmd = cmd;
+ pf_to_mgmt->proc.info[cmd_idx].proc = proc;
+
+ pf_to_mgmt->proc.cmd_num++;
+}
+
+void hinic_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ u8 cmd_idx;
+
+ if (!hwdev)
+ return;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ if (!pf_to_mgmt)
+ return;
+
+ cmd_idx = pf_to_mgmt->proc.cmd_num;
+ if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Unregister recv up process failed(cmd=0x%x)\r\n", cmd);
+ return;
+ }
+
+ for (cmd_idx = 0; cmd_idx < HINIC_COMM_SELF_CMD_MAX; cmd_idx++) {
+ if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) {
+ pf_to_mgmt->proc.info[cmd_idx].cmd = 0;
+ pf_to_mgmt->proc.info[cmd_idx].proc = NULL;
+ pf_to_mgmt->proc.cmd_num--;
+ }
+ }
+}
+
+/**
+ * mgmt_msg_len - calculate the total message length
+ * @msg_data_len: the length of the message data
+ * Return: the total message length
+ */
+static u16 mgmt_msg_len(u16 msg_data_len)
+{
+ /* u64 - the size of the header */
+ u16 msg_size;
+
+ msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len);
+
+ if (msg_size > MGMT_MSG_SIZE_MIN)
+ msg_size = MGMT_MSG_SIZE_MIN +
+ ALIGN((msg_size - MGMT_MSG_SIZE_MIN),
+ MGMT_MSG_SIZE_STEP);
+ else
+ msg_size = MGMT_MSG_SIZE_MIN;
+
+ return msg_size;
+}
+
+/**
+ * prepare_header - prepare the header of the message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: pointer of the header to prepare
+ * @msg_len: the length of the message
+ * @mod: module in the chip that will get the message
+ * @ack_type: message ack type
+ * @direction: the direction of the original message
+ * @cmd: cmd type
+ * @msg_id: message id
+ */
+static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u64 *header, u16 msg_len, enum hinic_mod_type mod,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ enum hinic_mgmt_cmd cmd, u32 msg_id)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif;
+
+ *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MSG_HEADER_SET(mod, MODULE) |
+ HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) |
+ HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HINIC_MSG_HEADER_SET(0, SEQID) |
+ HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HINIC_MSG_HEADER_SET(direction, DIRECTION) |
+ HINIC_MSG_HEADER_SET(cmd, CMD) |
+ HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
+ HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
+ HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
+static void clp_prepare_header(struct hinic_hwdev *hwdev,
+ u64 *header, u16 msg_len,
+ enum hinic_mod_type mod,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ enum hinic_mgmt_cmd cmd, u32 msg_id)
+{
+ struct hinic_hwif *hwif = hwdev->hwif;
+
+ *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MSG_HEADER_SET(mod, MODULE) |
+ HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) |
+ HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HINIC_MSG_HEADER_SET(0, SEQID) |
+ HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HINIC_MSG_HEADER_SET(direction, DIRECTION) |
+ HINIC_MSG_HEADER_SET(cmd, CMD) |
+ HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
+ HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
+ HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
+/**
+ * prepare_mgmt_cmd - prepare the mgmt command
+ * @mgmt_cmd: pointer to the command to prepare
+ * @header: pointer of the header to prepare
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ */
+static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg,
+ int msg_len)
+{
+ memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
+
+ mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
+ memcpy(mgmt_cmd, header, sizeof(*header));
+
+ mgmt_cmd += sizeof(*header);
+ memcpy(mgmt_cmd, msg, msg_len);
+}
+
+/**
+ * send_msg_to_mgmt_async - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ * Return: 0 - success, negative - failure
+ */
+static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_direction_type direction,
+ u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->async_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (!hinic_get_chip_present_flag(pf_to_mgmt->hwdev))
+ return -EFAULT;
+
+ if (cmd_size > MAX_MSG_SZ)
+ return -EINVAL;
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, ASYNC_MSG_ID(pf_to_mgmt));
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd,
+ cmd_size);
+}
+
+int hinic_pf_to_mgmt_async(void *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl;
+ int err;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+
+ /* Lock the async_msg_buf */
+ spin_lock_bh(&pf_to_mgmt->async_msg_lock);
+ ASYNC_MSG_ID_INC(pf_to_mgmt);
+
+ err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_DIRECT_SEND, MSG_NO_RESP);
+ spin_unlock_bh(&pf_to_mgmt->async_msg_lock);
+
+ if (err) {
+ sdk_err(dev, "Failed to send async mgmt msg\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * send_msg_to_mgmt_sync - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the msg data
+ * @msg_len: the msg data length
+ * @ack_type: message ack type
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ * Return: 0 - success, negative - failure
+ */
+static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->sync_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (!hinic_get_chip_present_flag(pf_to_mgmt->hwdev))
+ return -EFAULT;
+
+ if (cmd_size > MAX_MSG_SZ)
+ return -EINVAL;
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt));
+
+ if (ack_type == HINIC_MSG_ACK)
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START);
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd,
+ cmd_size);
+}
+
+int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl;
+ struct hinic_recv_msg *recv_msg;
+ struct hinic_msg_head *msg_head;
+ struct completion *recv_done;
+ ulong timeo;
+ int err;
+ ulong ret;
+
+ /* set aeq fix num to 3, need to ensure response aeq id < 3*/
+ if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC) {
+ msg_head = buf_in;
+
+ if (msg_head->resp_aeq_num >= HINIC_MAX_AEQS)
+ msg_head->resp_aeq_num = 0;
+ }
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+
+ /* Lock the sync_msg_buf */
+ down(&pf_to_mgmt->sync_msg_lock);
+ recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
+ recv_done = &recv_msg->recv_done;
+
+ init_completion(recv_done);
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+ if (err) {
+ sdk_err(dev, "Failed to send sync msg mod %d cmd 0x%x to mgmt, sync_msg_id: %d\n",
+ mod, cmd, pf_to_mgmt->sync_msg_id);
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL);
+ goto unlock_sync_msg;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+
+ ret = wait_for_completion_timeout(recv_done, timeo);
+ if (!ret) {
+ sdk_err(dev, "Mgmt response sync msg mod %d cmd 0x%x timeout, sync_msg_id: %d\n",
+ mod, cmd, pf_to_mgmt->sync_msg_id);
+ hinic_dump_aeq_info((struct hinic_hwdev *)hwdev);
+ err = -ETIMEDOUT;
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT);
+ goto unlock_sync_msg;
+ }
+ pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END);
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) {
+ up(&pf_to_mgmt->sync_msg_lock);
+ return -ETIMEDOUT;
+ }
+
+ if (buf_out && out_size) {
+ if (*out_size < recv_msg->msg_len) {
+ sdk_err(dev, "Invalid response message length: %d for mod %d cmd 0x%x from mgmt, should less than: %d\n",
+ recv_msg->msg_len, mod, cmd, *out_size);
+ err = -EFAULT;
+ goto unlock_sync_msg;
+ }
+
+ if (recv_msg->msg_len)
+ memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
+
+ *out_size = recv_msg->msg_len;
+ }
+
+unlock_sync_msg:
+ up(&pf_to_mgmt->sync_msg_lock);
+
+ return err;
+}
+
+static int __get_clp_reg(void *hwdev, enum clp_data_type data_type,
+ enum clp_reg_type reg_type, u32 *reg_addr)
+{
+ struct hinic_hwdev *dev = hwdev;
+ u32 offset;
+
+ offset = HINIC_CLP_REG_GAP * hinic_pcie_itf_id(dev);
+
+ switch (reg_type) {
+ case HINIC_CLP_BA_HOST:
+ *reg_addr = (data_type == HINIC_CLP_REQ_HOST) ?
+ HINIC_CLP_REG(REQ_SRAM_BA) :
+ HINIC_CLP_REG(RSP_SRAM_BA);
+ break;
+
+ case HINIC_CLP_SIZE_HOST:
+ *reg_addr = HINIC_CLP_REG(SRAM_SIZE);
+ break;
+
+ case HINIC_CLP_LEN_HOST:
+ *reg_addr = (data_type == HINIC_CLP_REQ_HOST) ?
+ HINIC_CLP_REG(REQ) : HINIC_CLP_REG(RSP);
+ break;
+
+ case HINIC_CLP_START_REQ_HOST:
+ *reg_addr = HINIC_CLP_REG(REQ);
+ break;
+
+ case HINIC_CLP_READY_RSP_HOST:
+ *reg_addr = HINIC_CLP_REG(RSP);
+ break;
+
+ default:
+ *reg_addr = 0;
+ break;
+ }
+ if (*reg_addr == 0)
+ return -EINVAL;
+
+ *reg_addr += offset;
+
+ return 0;
+}
+
+static int hinic_read_clp_reg(struct hinic_hwdev *hwdev,
+ enum clp_data_type data_type,
+ enum clp_reg_type reg_type, u32 *read_value)
+{
+ int err;
+ u32 reg_addr, reg_value;
+
+ if (data_type == HINIC_CLP_REQ_HOST &&
+ reg_type == HINIC_CLP_READY_RSP_HOST)
+ return -EINVAL;
+ if (data_type == HINIC_CLP_RSP_HOST &&
+ reg_type == HINIC_CLP_START_REQ_HOST)
+ return -EINVAL;
+
+ err = __get_clp_reg(hwdev, data_type, reg_type, ®_addr);
+ if (err)
+ return err;
+
+ reg_value = hinic_hwif_read_reg(hwdev->hwif, reg_addr);
+
+ switch (reg_type) {
+ case HINIC_CLP_BA_HOST:
+ reg_value = ((reg_value >>
+ HINIC_CLP_OFFSET(SRAM_BASE)) &
+ HINIC_CLP_MASK(SRAM_BASE));
+ break;
+
+ case HINIC_CLP_SIZE_HOST:
+ reg_value = ((reg_value >>
+ HINIC_CLP_OFFSET(SRAM_SIZE)) &
+ HINIC_CLP_MASK(SRAM_SIZE));
+ break;
+
+ case HINIC_CLP_LEN_HOST:
+ reg_value = ((reg_value >> HINIC_CLP_OFFSET(LEN)) &
+ HINIC_CLP_MASK(LEN));
+ break;
+
+ case HINIC_CLP_START_REQ_HOST:
+ reg_value = ((reg_value >> HINIC_CLP_OFFSET(START)) &
+ HINIC_CLP_MASK(START));
+ break;
+
+ case HINIC_CLP_READY_RSP_HOST:
+ reg_value = ((reg_value >> HINIC_CLP_OFFSET(READY)) &
+ HINIC_CLP_MASK(READY));
+ break;
+
+ default:
+ break;
+ }
+
+ *read_value = reg_value;
+ return 0;
+}
+
+static int __check_reg_value(enum clp_reg_type reg_type, u32 value)
+{
+ if (reg_type == HINIC_CLP_BA_HOST &&
+ value > HINIC_CLP_SRAM_BASE_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HINIC_CLP_SIZE_HOST &&
+ value > HINIC_CLP_SRAM_SIZE_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HINIC_CLP_LEN_HOST &&
+ value > HINIC_CLP_LEN_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HINIC_CLP_START_REQ_HOST &&
+ value > HINIC_CLP_START_OR_READY_REG_MAX)
+ return -EINVAL;
+
+ if (reg_type == HINIC_CLP_READY_RSP_HOST &&
+ value > HINIC_CLP_START_OR_READY_REG_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hinic_write_clp_reg(struct hinic_hwdev *hwdev,
+ enum clp_data_type data_type,
+ enum clp_reg_type reg_type, u32 value)
+{
+ u32 reg_addr, reg_value;
+
+ if (data_type == HINIC_CLP_REQ_HOST &&
+ reg_type == HINIC_CLP_READY_RSP_HOST)
+ return;
+ if (data_type == HINIC_CLP_RSP_HOST &&
+ reg_type == HINIC_CLP_START_REQ_HOST)
+ return;
+
+ if (__check_reg_value(reg_type, value))
+ return;
+
+ if (__get_clp_reg(hwdev, data_type, reg_type, ®_addr))
+ return;
+
+ reg_value = hinic_hwif_read_reg(hwdev->hwif, reg_addr);
+
+ switch (reg_type) {
+ case HINIC_CLP_LEN_HOST:
+ reg_value = reg_value &
+ (~(HINIC_CLP_MASK(LEN) << HINIC_CLP_OFFSET(LEN)));
+ reg_value = reg_value | (value << HINIC_CLP_OFFSET(LEN));
+ break;
+
+ case HINIC_CLP_START_REQ_HOST:
+ reg_value = reg_value &
+ (~(HINIC_CLP_MASK(START) <<
+ HINIC_CLP_OFFSET(START)));
+ reg_value = reg_value | (value << HINIC_CLP_OFFSET(START));
+ break;
+
+ case HINIC_CLP_READY_RSP_HOST:
+ reg_value = reg_value &
+ (~(HINIC_CLP_MASK(READY) <<
+ HINIC_CLP_OFFSET(READY)));
+ reg_value = reg_value | (value << HINIC_CLP_OFFSET(READY));
+ break;
+
+ default:
+ return;
+ }
+
+ hinic_hwif_write_reg(hwdev->hwif, reg_addr, reg_value);
+}
+
+static int hinic_read_clp_data(struct hinic_hwdev *hwdev,
+ void *buf_out, u16 *out_size)
+{
+ int err;
+ u32 reg = HINIC_CLP_DATA(RSP);
+ u32 ready, delay_cnt;
+ u32 *ptr = (u32 *)buf_out;
+ u32 temp_out_size = 0;
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_READY_RSP_HOST, &ready);
+ if (err)
+ return err;
+
+ delay_cnt = 0;
+ while (ready == 0) {
+ usleep_range(9000, 10000);
+ delay_cnt++;
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_READY_RSP_HOST, &ready);
+ if (err || delay_cnt > HINIC_CLP_DELAY_CNT_MAX) {
+ sdk_err(hwdev->dev_hdl, "Timeout with delay_cnt: %d\n",
+ delay_cnt);
+ return -EINVAL;
+ }
+ }
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_LEN_HOST, &temp_out_size);
+ if (err)
+ return err;
+
+ if (temp_out_size > HINIC_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) {
+ sdk_err(hwdev->dev_hdl, "Invalid temp_out_size: %d\n",
+ temp_out_size);
+ return -EINVAL;
+ }
+
+ *out_size = (u16)(temp_out_size & 0xffff);
+ for (; temp_out_size > 0; temp_out_size--) {
+ *ptr = hinic_hwif_read_reg(hwdev->hwif, reg);
+ ptr++;
+ reg = reg + 4;
+ }
+
+ hinic_write_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_READY_RSP_HOST, (u32)0x0);
+ hinic_write_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_LEN_HOST, (u32)0x0);
+
+ return 0;
+}
+
+static int hinic_write_clp_data(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size)
+{
+ int err;
+ u32 reg = HINIC_CLP_DATA(REQ);
+ u32 start = 1;
+ u32 delay_cnt = 0;
+ u32 *ptr = (u32 *)buf_in;
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_START_REQ_HOST, &start);
+ if (err)
+ return err;
+
+ while (start == 1) {
+ usleep_range(9000, 10000);
+ delay_cnt++;
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_START_REQ_HOST, &start);
+ if (err || delay_cnt > HINIC_CLP_DELAY_CNT_MAX)
+ return -EINVAL;
+ }
+
+ hinic_write_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_LEN_HOST, in_size);
+ hinic_write_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_START_REQ_HOST, (u32)0x1);
+
+ for (; in_size > 0; in_size--) {
+ hinic_hwif_write_reg(hwdev->hwif, reg, *ptr);
+ ptr++;
+ reg = reg + 4;
+ }
+
+ return 0;
+}
+
+static int hinic_check_clp_init_status(struct hinic_hwdev *hwdev)
+{
+ int err;
+ u32 reg_value = 0;
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_BA_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong req ba value: 0x%x\n",
+ reg_value);
+ return -EINVAL;
+ }
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_BA_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong rsp ba value: 0x%x\n",
+ reg_value);
+ return -EINVAL;
+ }
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST,
+ HINIC_CLP_SIZE_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong req size\n");
+ return -EINVAL;
+ }
+
+ err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_SIZE_HOST, ®_value);
+ if (err || !reg_value) {
+ sdk_err(hwdev->dev_hdl, "Wrong rsp size\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic_clear_clp_data(struct hinic_hwdev *hwdev,
+ enum clp_data_type data_type)
+{
+ u32 reg = (data_type == HINIC_CLP_REQ_HOST) ?
+ HINIC_CLP_DATA(REQ) : HINIC_CLP_DATA(RSP);
+ u32 count = HINIC_CLP_INPUT_BUFFER_LEN_HOST / HINIC_CLP_DATA_UNIT_HOST;
+
+ for (; count > 0; count--) {
+ hinic_hwif_write_reg(hwdev->hwif, reg, 0x0);
+ reg = reg + 4;
+ }
+}
+
+int hinic_pf_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ const void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt;
+ struct hinic_hwdev *dev = hwdev;
+ u64 header;
+ u16 real_size;
+ u8 *clp_msg_buf;
+ int err;
+
+ clp_pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->clp_pf_to_mgmt;
+ clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf;
+
+ /* 4 bytes alignment */
+ if (in_size % HINIC_CLP_DATA_UNIT_HOST)
+ real_size = (in_size + (u16)sizeof(header)
+ + HINIC_CLP_DATA_UNIT_HOST);
+ else
+ real_size = in_size + (u16)sizeof(header);
+ real_size = real_size / HINIC_CLP_DATA_UNIT_HOST;
+
+ if (real_size >
+ (HINIC_CLP_INPUT_BUFFER_LEN_HOST / HINIC_CLP_DATA_UNIT_HOST)) {
+ sdk_err(dev->dev_hdl, "Invalid real_size: %d\n", real_size);
+ return -EINVAL;
+ }
+ down(&clp_pf_to_mgmt->clp_msg_lock);
+
+ err = hinic_check_clp_init_status(dev);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Check clp init status failed\n");
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return err;
+ }
+
+ hinic_clear_clp_data(dev, HINIC_CLP_RSP_HOST);
+ hinic_write_clp_reg(dev, HINIC_CLP_RSP_HOST,
+ HINIC_CLP_READY_RSP_HOST, 0x0);
+
+ /* Send request */
+ memset(clp_msg_buf, 0x0, HINIC_CLP_INPUT_BUFFER_LEN_HOST);
+ clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0);
+
+ memcpy(clp_msg_buf, &header, sizeof(header));
+ clp_msg_buf += sizeof(header);
+ memcpy(clp_msg_buf, buf_in, in_size);
+
+ clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf;
+
+ hinic_clear_clp_data(dev, HINIC_CLP_REQ_HOST);
+ err = hinic_write_clp_data(hwdev,
+ clp_pf_to_mgmt->clp_msg_buf, real_size);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Send clp request failed\n");
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+
+ /* Get response */
+ clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf;
+ memset(clp_msg_buf, 0x0, HINIC_CLP_INPUT_BUFFER_LEN_HOST);
+ err = hinic_read_clp_data(hwdev, clp_msg_buf, &real_size);
+ hinic_clear_clp_data(dev, HINIC_CLP_RSP_HOST);
+ if (err) {
+ sdk_err(dev->dev_hdl, "Read clp response failed\n");
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+
+ real_size = (u16)((real_size * HINIC_CLP_DATA_UNIT_HOST) & 0xffff);
+ if (real_size <= sizeof(header) ||
+ real_size > HINIC_CLP_INPUT_BUFFER_LEN_HOST) {
+ sdk_err(dev->dev_hdl, "Invalid response size: %d", real_size);
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+ real_size = real_size - sizeof(header);
+ if (real_size != *out_size) {
+ sdk_err(dev->dev_hdl, "Invalid real_size: %d, out_size: %d\n",
+ real_size, *out_size);
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+ return -EINVAL;
+ }
+
+ memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size);
+ up(&clp_pf_to_mgmt->clp_msg_lock);
+
+ return 0;
+}
+
+int hinic_msg_to_mgmt_poll_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ return 0;
+}
+
+/* This function is only used by txrx flush */
+int hinic_pf_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl;
+ int err = -EINVAL;
+
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) {
+ sdk_err(dev, "Mgmt module not initialized\n");
+ return -EINVAL;
+ }
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+
+ if (!MSG_SZ_IS_VALID(in_size)) {
+ sdk_err(dev, "Mgmt msg buffer size: %d is not valid\n",
+ in_size);
+ return -EINVAL;
+ }
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -EPERM;
+
+ /* Lock the sync_msg_buf */
+ down(&pf_to_mgmt->sync_msg_lock);
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+
+ up(&pf_to_mgmt->sync_msg_lock);
+
+ return err;
+}
+
+/**
+ * api cmd write or read bypass defaut use poll, if want to use aeq interrupt,
+ * please set wb_trigger_aeqe to 1
+ */
+int hinic_api_cmd_write_nack(void *hwdev, u8 dest, void *cmd, u16 size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic_api_cmd_chain *chain;
+
+ if (!hwdev || !size || !cmd)
+ return -EINVAL;
+
+ if (size > MAX_CMD_BUF_SIZE)
+ return -EINVAL;
+
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) ||
+ hinic_get_mgmt_channel_status(hwdev))
+ return -EPERM;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_POLL_WRITE];
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -EPERM;
+
+ return hinic_api_cmd_write(chain, dest, cmd, size);
+}
+EXPORT_SYMBOL(hinic_api_cmd_write_nack);
+
+int hinic_api_cmd_read_ack(void *hwdev, u8 dest, void *cmd, u16 size, void *ack,
+ u16 ack_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic_api_cmd_chain *chain;
+
+ if (!hwdev || !cmd || (ack_size && !ack))
+ return -EINVAL;
+
+ if (size > MAX_CMD_BUF_SIZE)
+ return -EINVAL;
+
+ if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) ||
+ hinic_get_mgmt_channel_status(hwdev))
+ return -EPERM;
+
+ pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_POLL_READ];
+
+ if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag))
+ return -EPERM;
+
+ return hinic_api_cmd_read(chain, dest, cmd, size, ack, ack_size);
+}
+EXPORT_SYMBOL(hinic_api_cmd_read_ack);
+
+static void __send_mgmt_ack(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, u16 msg_id)
+{
+ u16 buf_size;
+
+ if (!in_size)
+ buf_size = BUF_OUT_DEFAULT_SIZE;
+ else
+ buf_size = in_size;
+
+ spin_lock_bh(&pf_to_mgmt->async_msg_lock);
+ /* MGMT sent sync msg, send the response */
+ send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd,
+ buf_in, buf_size, HINIC_MSG_RESPONSE,
+ msg_id);
+ spin_unlock_bh(&pf_to_mgmt->async_msg_lock);
+}
+
+/**
+ * mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @buf_in: the buffer of recv msg
+ * @in_size: the size of buffer
+ * @msg_id: message id
+ * @need_resp: the flag of need resp
+ */
+static void mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, u16 msg_id, int need_resp)
+{
+ void *dev = pf_to_mgmt->hwdev->dev_hdl;
+ void *buf_out = pf_to_mgmt->mgmt_ack_buf;
+ enum hinic_mod_type tmp_mod = mod;
+ bool ack_first = false;
+ u16 out_size = 0;
+
+ memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
+
+ if (mod >= HINIC_MOD_HW_MAX) {
+ sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n",
+ mod);
+ goto resp;
+ }
+
+ set_bit(HINIC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]);
+
+ if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] ||
+ !test_bit(HINIC_MGMT_MSG_CB_REG,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) {
+ sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n",
+ mod);
+ clear_bit(HINIC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]);
+ goto resp;
+ }
+
+ ack_first = hinic_mgmt_event_ack_first(mod, cmd);
+ if (ack_first && need_resp) {
+ /* send ack to mgmt first to avoid command timeout in
+ * mgmt(100ms in mgmt);
+ * mgmt to host command don't need any response data from host,
+ * just need ack from host
+ */
+ __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, in_size, msg_id);
+ }
+
+ pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev,
+ pf_to_mgmt->recv_mgmt_msg_data[tmp_mod],
+ cmd, buf_in, in_size,
+ buf_out, &out_size);
+
+ clear_bit(HINIC_MGMT_MSG_CB_RUNNING,
+ &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]);
+
+resp:
+ if (!ack_first && need_resp)
+ __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size,
+ msg_id);
+}
+
+/**
+ * mgmt_resp_msg_handler - handler for response message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ */
+static void mgmt_resp_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg)
+{
+ void *dev = pf_to_mgmt->hwdev->dev_hdl;
+
+ /* delete async msg */
+ if (recv_msg->msg_id & ASYNC_MSG_FLAG)
+ return;
+
+ spin_lock_bh(&pf_to_mgmt->sync_event_lock);
+ if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id &&
+ pf_to_mgmt->event_flag == SEND_EVENT_START) {
+ complete(&recv_msg->recv_done);
+ } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) {
+ sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state: %d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ } else {
+ sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state: %d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ }
+ spin_unlock_bh(&pf_to_mgmt->sync_event_lock);
+}
+
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
+{
+ struct hinic_mgmt_msg_handle_work *mgmt_work =
+ container_of(work, struct hinic_mgmt_msg_handle_work, work);
+
+ mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod,
+ mgmt_work->cmd, mgmt_work->msg,
+ mgmt_work->msg_len, mgmt_work->msg_id,
+ !mgmt_work->async_mgmt_to_pf);
+
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+}
+
+static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN)
+ return false;
+ else if (seq_id == MGMT_MSG_MAX_SEQ_ID &&
+ seg_len > MGMT_MSG_LAST_SEG_MAX_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_msg->seq_id = seq_id;
+ } else {
+ if (seq_id != recv_msg->seq_id + 1)
+ return false;
+ recv_msg->seq_id = seq_id;
+ }
+
+ return true;
+}
+
+/**
+ * recv_mgmt_msg_handler - handler a message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: the header of the message
+ * @recv_msg: received message details
+ */
+static void recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u8 *header, struct hinic_recv_msg *recv_msg)
+{
+ struct hinic_mgmt_msg_handle_work *mgmt_work;
+ u64 mbox_header = *((u64 *)header);
+ void *msg_body = header + sizeof(mbox_header);
+ u8 seq_id, seq_len;
+ u32 offset;
+ u64 dir;
+
+ /* Don't need to get anything from hw when cmd is async */
+ dir = HINIC_MSG_HEADER_GET(mbox_header, DIRECTION);
+ if (dir == HINIC_MSG_RESPONSE &&
+ HINIC_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG)
+ return;
+
+ seq_len = HINIC_MSG_HEADER_GET(mbox_header, SEG_LEN);
+ seq_id = HINIC_MSG_HEADER_GET(mbox_header, SEQID);
+
+ if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) {
+ sdk_err(pf_to_mgmt->hwdev->dev_hdl,
+ "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x, current seq_id: 0x%x, seg len: 0x%x\n",
+ recv_msg->seq_id, seq_id, seq_len);
+ /* set seq_id to invalid seq_id */
+ recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID;
+ return;
+ }
+
+ offset = seq_id * SEGMENT_LEN;
+ memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len);
+
+ if (!HINIC_MSG_HEADER_GET(mbox_header, LAST))
+ return;
+
+ recv_msg->cmd = HINIC_MSG_HEADER_GET(mbox_header, CMD);
+ recv_msg->mod = HINIC_MSG_HEADER_GET(mbox_header, MODULE);
+ recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(mbox_header,
+ ASYNC_MGMT_TO_PF);
+ recv_msg->msg_len = HINIC_MSG_HEADER_GET(mbox_header, MSG_LEN);
+ recv_msg->msg_id = HINIC_MSG_HEADER_GET(mbox_header, MSG_ID);
+ recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID;
+
+ if (HINIC_MSG_HEADER_GET(mbox_header, DIRECTION) ==
+ HINIC_MSG_RESPONSE) {
+ mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
+ return;
+ }
+
+ mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
+ if (!mgmt_work)
+ return;
+
+ if (recv_msg->msg_len) {
+ mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
+ if (!mgmt_work->msg) {
+ kfree(mgmt_work);
+ return;
+ }
+ }
+
+ mgmt_work->pf_to_mgmt = pf_to_mgmt;
+ mgmt_work->msg_len = recv_msg->msg_len;
+ memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+ mgmt_work->msg_id = recv_msg->msg_id;
+ mgmt_work->mod = recv_msg->mod;
+ mgmt_work->cmd = recv_msg->cmd;
+ mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+ INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+ queue_work(pf_to_mgmt->workq, &mgmt_work->work);
+}
+
+/**
+ * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event
+ * @hwdev: the pointer to hw device
+ * @header: the header of the message
+ * @size: unused
+ */
+void hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic_recv_msg *recv_msg;
+ bool is_send_dir = false;
+
+ pf_to_mgmt = dev->pf_to_mgmt;
+
+ is_send_dir = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) ==
+ HINIC_MSG_DIRECT_SEND) ? true : false;
+
+ recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
+}
+
+/**
+ * alloc_recv_msg - allocate received message memory
+ * @recv_msg: pointer that will hold the allocated data
+ * Return: 0 - success, negative - failure
+ */
+static int alloc_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID;
+
+ recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->msg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * free_recv_msg - free received message memory
+ * @recv_msg: pointer that holds the allocated data
+ */
+static void free_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ kfree(recv_msg->msg);
+}
+
+/**
+ * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ */
+static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ int err;
+ void *dev = pf_to_mgmt->hwdev->dev_hdl;
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ sdk_err(dev, "Failed to allocate recv msg\n");
+ return err;
+ }
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ sdk_err(dev, "Failed to allocate resp recv msg\n");
+ goto alloc_msg_for_resp_err;
+ }
+
+ pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->async_msg_buf) {
+ err = -ENOMEM;
+ goto async_msg_buf_err;
+ }
+
+ pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->sync_msg_buf) {
+ err = -ENOMEM;
+ goto sync_msg_buf_err;
+ }
+
+ pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->mgmt_ack_buf) {
+ err = -ENOMEM;
+ goto ack_msg_buf_err;
+ }
+
+ return 0;
+
+ack_msg_buf_err:
+ kfree(pf_to_mgmt->sync_msg_buf);
+
+sync_msg_buf_err:
+ kfree(pf_to_mgmt->async_msg_buf);
+
+async_msg_buf_err:
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+
+alloc_msg_for_resp_err:
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ return err;
+}
+
+/**
+ * free_msg_buf - free all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ */
+static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ kfree(pf_to_mgmt->mgmt_ack_buf);
+ kfree(pf_to_mgmt->sync_msg_buf);
+ kfree(pf_to_mgmt->async_msg_buf);
+
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+}
+
+/**
+ * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
+ * @hwdev: the pointer to hw device
+ * Return: 0 - success, negative - failure
+ */
+int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ void *dev = hwdev->dev_hdl;
+ int err;
+
+ pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
+ if (!pf_to_mgmt)
+ return -ENOMEM;
+
+ hwdev->pf_to_mgmt = pf_to_mgmt;
+ pf_to_mgmt->hwdev = hwdev;
+ spin_lock_init(&pf_to_mgmt->async_msg_lock);
+ spin_lock_init(&pf_to_mgmt->sync_event_lock);
+ sema_init(&pf_to_mgmt->sync_msg_lock, 1);
+ pf_to_mgmt->workq = create_singlethread_workqueue(HINIC_MGMT_WQ_NAME);
+ if (!pf_to_mgmt->workq) {
+ sdk_err(dev, "Failed to initialize MGMT workqueue\n");
+ err = -ENOMEM;
+ goto create_mgmt_workq_err;
+ }
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ sdk_err(dev, "Failed to allocate msg buffers\n");
+ goto alloc_msg_buf_err;
+ }
+
+ err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain);
+ if (err) {
+ sdk_err(dev, "Failed to init the api cmd chains\n");
+ goto api_cmd_init_err;
+ }
+
+ return 0;
+
+api_cmd_init_err:
+ free_msg_buf(pf_to_mgmt);
+
+alloc_msg_buf_err:
+ destroy_workqueue(pf_to_mgmt->workq);
+
+create_mgmt_workq_err:
+ kfree(pf_to_mgmt);
+
+ return err;
+}
+
+/**
+ * hinic_pf_to_mgmt_free - free PF to MGMT channel
+ * @hwdev: the pointer to hw device
+ */
+void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+
+ /* destroy workqueue before free related pf_to_mgmt resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(pf_to_mgmt->workq);
+ hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+ free_msg_buf(pf_to_mgmt);
+ kfree(pf_to_mgmt);
+}
+
+void hinic_flush_mgmt_workq(void *hwdev)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+
+ flush_workqueue(dev->aeqs->workq);
+
+ if (hinic_func_type(dev) != TYPE_VF &&
+ hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED))
+ flush_workqueue(dev->pf_to_mgmt->workq);
+}
+
+int hinic_clp_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt;
+
+ clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL);
+ if (!clp_pf_to_mgmt)
+ return -ENOMEM;
+
+ clp_pf_to_mgmt->clp_msg_buf = kzalloc(HINIC_CLP_INPUT_BUFFER_LEN_HOST,
+ GFP_KERNEL);
+ if (!clp_pf_to_mgmt->clp_msg_buf) {
+ kfree(clp_pf_to_mgmt);
+ return -ENOMEM;
+ }
+ sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1);
+
+ hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt;
+
+ return 0;
+}
+
+void hinic_clp_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt;
+
+ kfree(clp_pf_to_mgmt->clp_msg_buf);
+ kfree(clp_pf_to_mgmt);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
new file mode 100644
index 000000000000..23ac9d2362a1
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_MGMT_H_
+#define HINIC_MGMT_H_
+
+#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MSG_HEADER_MODULE_SHIFT 11
+#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
+#define HINIC_MSG_HEADER_SEQID_SHIFT 24
+#define HINIC_MSG_HEADER_LAST_SHIFT 30
+#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MSG_HEADER_CMD_SHIFT 32
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48
+#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50
+#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54
+
+#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MSG_HEADER_MODULE_MASK 0x1F
+#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
+#define HINIC_MSG_HEADER_SEQID_MASK 0x3F
+#define HINIC_MSG_HEADER_LAST_MASK 0x1
+#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MSG_HEADER_CMD_MASK 0xFF
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3
+#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF
+#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF
+
+#define HINIC_MSG_HEADER_GET(val, member) \
+ (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \
+ HINIC_MSG_HEADER_##member##_MASK)
+
+#define HINIC_MSG_HEADER_SET(val, member) \
+ ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \
+ HINIC_MSG_HEADER_##member##_SHIFT)
+
+#define HINIC_MGMT_WQ_NAME "hinic_mgmt"
+
+enum clp_data_type {
+ HINIC_CLP_REQ_HOST = 0,
+ HINIC_CLP_RSP_HOST = 1
+};
+
+enum clp_reg_type {
+ HINIC_CLP_BA_HOST = 0,
+ HINIC_CLP_SIZE_HOST = 1,
+ HINIC_CLP_LEN_HOST = 2,
+ HINIC_CLP_START_REQ_HOST = 3,
+ HINIC_CLP_READY_RSP_HOST = 4
+};
+
+#define HINIC_CLP_REG_GAP 0x20
+#define HINIC_CLP_INPUT_BUFFER_LEN_HOST 4096UL
+#define HINIC_CLP_DATA_UNIT_HOST 4UL
+
+#define HINIC_BAR01_GLOABAL_CTL_OFFSET 0x4000
+#define HINIC_BAR01_CLP_OFFSET 0x5000
+
+#define HINIC_CLP_SRAM_SIZE_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x220)
+#define HINIC_CLP_REQ_SRAM_BA_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x224)
+#define HINIC_CLP_RSP_SRAM_BA_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x228)
+#define HINIC_CLP_REQ_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x22c)
+#define HINIC_CLP_RSP_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x230)
+#define HINIC_CLP_REG(member) (HINIC_CLP_##member##_REG)
+
+#define HINIC_CLP_REQ_DATA (HINIC_BAR01_CLP_OFFSET)
+#define HINIC_CLP_RSP_DATA (HINIC_BAR01_CLP_OFFSET + 0x1000)
+#define HINIC_CLP_DATA(member) (HINIC_CLP_##member##_DATA)
+
+#define HINIC_CLP_SRAM_SIZE_OFFSET 16
+#define HINIC_CLP_SRAM_BASE_OFFSET 0
+#define HINIC_CLP_LEN_OFFSET 0
+#define HINIC_CLP_START_OFFSET 31
+#define HINIC_CLP_READY_OFFSET 31
+#define HINIC_CLP_OFFSET(member) (HINIC_CLP_##member##_OFFSET)
+
+#define HINIC_CLP_SRAM_SIZE_BIT_LEN 0x7ffUL
+#define HINIC_CLP_SRAM_BASE_BIT_LEN 0x7ffffffUL
+#define HINIC_CLP_LEN_BIT_LEN 0x7ffUL
+#define HINIC_CLP_START_BIT_LEN 0x1UL
+#define HINIC_CLP_READY_BIT_LEN 0x1UL
+#define HINIC_CLP_MASK(member) (HINIC_CLP_##member##_BIT_LEN)
+
+#define HINIC_CLP_DELAY_CNT_MAX 200UL
+#define HINIC_CLP_SRAM_SIZE_REG_MAX 0x3ff
+#define HINIC_CLP_SRAM_BASE_REG_MAX 0x7ffffff
+#define HINIC_CLP_LEN_REG_MAX 0x3ff
+#define HINIC_CLP_START_OR_READY_REG_MAX 0x1
+
+enum hinic_msg_direction_type {
+ HINIC_MSG_DIRECT_SEND = 0,
+ HINIC_MSG_RESPONSE = 1
+};
+
+enum hinic_msg_segment_type {
+ NOT_LAST_SEGMENT = 0,
+ LAST_SEGMENT = 1,
+};
+
+enum hinic_mgmt_msg_type {
+ ASYNC_MGMT_MSG = 0,
+ SYNC_MGMT_MSG = 1,
+};
+
+enum hinic_msg_ack_type {
+ HINIC_MSG_ACK = 0,
+ HINIC_MSG_NO_ACK = 1,
+};
+
+struct hinic_recv_msg {
+ void *msg;
+
+ struct completion recv_done;
+
+ u16 msg_len;
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u8 seq_id;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+};
+
+#define HINIC_COMM_SELF_CMD_MAX 8
+
+struct comm_up_self_msg_sub_info {
+ u8 cmd;
+ comm_up_self_msg_proc proc;
+};
+
+struct comm_up_self_msg_info {
+ u8 cmd_num;
+ struct comm_up_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX];
+};
+
+enum comm_pf_to_mgmt_event_state {
+ SEND_EVENT_UNINIT = 0,
+ SEND_EVENT_START,
+ SEND_EVENT_FAIL,
+ SEND_EVENT_TIMEOUT,
+ SEND_EVENT_END,
+};
+
+enum hinic_mgmt_msg_cb_state {
+ HINIC_MGMT_MSG_CB_REG = 0,
+ HINIC_MGMT_MSG_CB_RUNNING,
+};
+
+struct hinic_clp_pf_to_mgmt {
+ struct semaphore clp_msg_lock;
+ void *clp_msg_buf;
+};
+
+struct hinic_msg_pf_to_mgmt {
+ struct hinic_hwdev *hwdev;
+
+ /* Async cmd can not be scheduling */
+ spinlock_t async_msg_lock;
+ struct semaphore sync_msg_lock;
+
+ struct workqueue_struct *workq;
+
+ void *async_msg_buf;
+ void *sync_msg_buf;
+ void *mgmt_ack_buf;
+
+ struct hinic_recv_msg recv_msg_from_mgmt;
+ struct hinic_recv_msg recv_resp_msg_from_mgmt;
+
+ u16 async_msg_id;
+ u16 sync_msg_id;
+
+ struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
+
+ hinic_mgmt_msg_cb recv_mgmt_msg_cb[HINIC_MOD_HW_MAX];
+ void *recv_mgmt_msg_data[HINIC_MOD_HW_MAX];
+ unsigned long mgmt_msg_cb_state[HINIC_MOD_HW_MAX];
+
+ void (*async_msg_cb[HINIC_MOD_HW_MAX])(void *handle,
+ enum hinic_mgmt_cmd cmd,
+ void *priv_data, u32 msg_id,
+ void *buf_out, u32 out_size);
+
+ void *async_msg_cb_data[HINIC_MOD_HW_MAX];
+
+ struct comm_up_self_msg_info proc;
+
+ /* lock when sending msg */
+ spinlock_t sync_event_lock;
+ enum comm_pf_to_mgmt_event_state event_flag;
+};
+
+struct hinic_mgmt_msg_handle_work {
+ struct work_struct work;
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+
+ void *msg;
+ u16 msg_len;
+
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+
+ int async_mgmt_to_pf;
+};
+
+int hinic_pf_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size);
+
+void hinic_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size);
+
+int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev);
+
+void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev);
+
+int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_pf_to_mgmt_async(void *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size);
+
+int hinic_pf_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ const void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+
+int hinic_clp_pf_to_mgmt_init(struct hinic_hwdev *hwdev);
+void hinic_clp_pf_to_mgmt_free(struct hinic_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
new file mode 100644
index 000000000000..48ef0a4b0e7d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h
@@ -0,0 +1,987 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_MGMT_INTERFACE_H
+#define HINIC_MGMT_INTERFACE_H
+
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+
+#include "hinic_port_cmd.h"
+
+/* up to driver event */
+#define HINIC_PORT_CMD_MGMT_RESET 0x0
+
+struct hinic_msg_head {
+ u8 status;
+ u8 version;
+ u8 resp_aeq_num;
+ u8 rsvd0[5];
+};
+
+struct hinic_register_vf {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct hinic_tx_rate_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 tx_rate;
+};
+
+struct hinic_tx_rate_cfg_max_min {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 min_rate;
+ u32 max_rate;
+ u8 rsvd2[8];
+};
+
+struct hinic_port_mac_get {
+ u16 func_id;
+ u8 mac[ETH_ALEN];
+ int ret;
+};
+
+struct hinic_function_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rx_wqe_buf_size;
+ u32 mtu;
+};
+
+struct hinic_cmd_qpn {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 base_qpn;
+};
+
+struct hinic_port_mac_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct hinic_port_mac_update {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct hinic_vport_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct hinic_port_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1;
+ u16 func_id;
+};
+
+struct hinic_spoofchk_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1;
+ u16 func_id;
+};
+
+struct hinic_mtu {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 mtu;
+};
+
+struct hinic_vlan_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+};
+
+struct hinic_speed_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 speed;
+};
+
+struct hinic_link_mode_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represent Invalid value */
+ u16 advertised;
+};
+
+struct hinic_set_autoneg_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable; /* 1: enable , 0: disable */
+};
+
+struct hinic_set_link_status {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable;
+};
+
+struct hinic_get_link {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 link_status;
+ u8 rsvd1;
+};
+
+struct hinic_link_status_report {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 link_status;
+ u8 port_id;
+};
+
+struct hinic_port_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+ u8 resv2[3];
+};
+
+struct hinic_tso_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 tso_en;
+ u8 resv2[3];
+};
+
+struct hinic_lro_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 resv2[13];
+};
+
+struct hinic_lro_timer {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 type; /* 0: set timer value, 1: get timer value */
+ u8 enable; /* when set lro time, enable should be 1 */
+ u16 rsvd1;
+ u32 timer;
+};
+
+struct hinic_checksum_offload {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_csum_offload;
+};
+
+struct hinic_vlan_offload {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 vlan_rx_offload;
+ u8 rsvd1[5];
+};
+
+struct hinic_vlan_filter {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 rsvd1[2];
+ u32 vlan_filter_ctrl;
+};
+
+struct hinic_pause_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct hinic_rx_mode_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_mode;
+};
+
+/* rss */
+struct nic_rss_indirect_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u8 entry[HINIC_RSS_INDIR_SIZE];
+};
+
+struct nic_rss_context_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u32 ctx;
+};
+
+struct hinic_rss_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 rss_en;
+ u8 template_id;
+ u8 rq_priority_number;
+ u8 rsvd1[3];
+ u8 prio_tc[HINIC_DCB_UP_MAX];
+};
+
+struct hinic_rss_template_mgmt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 cmd;
+ u8 template_id;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_indir_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 indir[HINIC_RSS_INDIR_SIZE];
+};
+
+struct hinic_rss_template_key {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 key[HINIC_RSS_KEY_SIZE];
+};
+
+struct hinic_rss_engine_type {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_context_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u32 context;
+};
+
+struct hinic_up_ets_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ u8 rsvd1[3];
+ u8 up_tc[HINIC_DCB_UP_MAX];
+ u8 pg_bw[HINIC_DCB_PG_MAX];
+ u8 pgid[HINIC_DCB_UP_MAX];
+ u8 up_bw[HINIC_DCB_UP_MAX];
+ u8 prio[HINIC_DCB_PG_MAX];
+};
+
+struct hinic_set_pfc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 pfc_en;
+ u8 pfc_bitmap;
+ u8 rsvd1[4];
+};
+
+struct hinic_set_micro_pfc {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 micro_pfc_en;
+ u8 rsvd1;
+ u8 cfg_rq_max;
+ u8 cfg_rq_depth;
+ u16 rq_sm_thd;
+};
+
+struct hinic_cos_up_map {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port_id;
+ /* every bit indicate index of map is valid or not */
+ u8 cos_valid_mask;
+ u16 rsvd1;
+
+ /* user priority in cos(index:cos, value: up) */
+ u8 map[HINIC_DCB_COS_MAX];
+};
+
+struct hinic_set_rq_iq_mapping {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 map[64];
+ u32 num_rqs;
+ u32 rq_depth;
+};
+
+#define HINIC_PFC_SET_FUNC_THD 0
+#define HINIC_PFC_SET_GLB_THD 1
+struct hinic_pfc_thd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 op_type;
+ u16 func_thd;
+ u16 glb_thd;
+};
+
+/* set iq enable to ucode */
+struct hinic_cmd_enable_iq {
+ u8 rq_depth;
+ u8 num_rq;
+ u16 glb_rq_id;
+
+ u16 q_id;
+ u16 lower_thd;
+
+ u16 force_en; /* 1: force unlock, 0: depend on condition */
+ u16 prod_idx;
+};
+
+/* set iq enable to mgmt cpu */
+struct hinic_cmd_enable_iq_mgmt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 rq_depth;
+ u8 num_rq;
+ u16 glb_rq_id;
+
+ u16 q_id;
+ u16 lower_thd;
+
+ u16 force_en; /* 1: force unlock, 0: depend on condition */
+ u16 prod_idx;
+};
+
+struct hinic_port_link_status {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 link;
+ u8 port_id;
+};
+
+struct hinic_cable_plug_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 plugged; /* 0: unplugged, 1: plugged */
+ u8 port_id;
+};
+
+struct hinic_link_err_event {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 err_type;
+ u8 port_id;
+};
+
+struct hinic_sync_time_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u64 mstime;
+};
+
+#define HINIC_PORT_STATS_VERSION 0
+
+struct hinic_port_stats_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_port_stats {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ struct hinic_phy_port_stats stats;
+};
+
+struct hinic_cmd_vport_stats {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_vport_stats stats;
+};
+
+struct hinic_vf_vlan_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u8 qos;
+ u8 rsvd1[7];
+};
+
+struct hinic_port_ipsu_mac {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 index;
+ u16 func_id;
+ u16 vlan_id;
+ u8 mac[ETH_ALEN];
+};
+
+/* get or set loopback mode, need to modify by base API */
+#define HINIC_INTERNAL_LP_MODE 5
+#define LOOP_MODE_MIN 1
+#define LOOP_MODE_MAX 6
+
+struct hinic_port_loopback {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u32 mode;
+ u32 en;
+};
+
+#define HINIC_COMPILE_TIME_LEN 20
+struct hinic_version_info {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u8 ver[HINIC_FW_VERSION_NAME];
+ u8 time[HINIC_COMPILE_TIME_LEN];
+};
+
+#define ANTI_ATTACK_DEFAULT_CIR 500000
+#define ANTI_ATTACK_DEFAULT_XIR 600000
+#define ANTI_ATTACK_DEFAULT_CBS 10000000
+#define ANTI_ATTACK_DEFAULT_XBS 12000000
+/* set physical port Anti-Attack rate */
+struct hinic_port_anti_attack_rate {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */
+ u32 cir; /* Committed Information Rate */
+ u32 xir; /* eXtended Information Rate */
+ u32 cbs; /* Committed Burst Size */
+ u32 xbs; /* eXtended Burst Size */
+};
+
+struct hinic_clear_sq_resource {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_l2nic_reset {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 reset_flag;
+};
+
+struct hinic_super_cqe {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 super_cqe_en;
+};
+
+struct hinic_capture_info {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u32 op_type;
+ u32 func_id;
+ u32 is_en_trx;
+ u32 offset_cos;
+ u32 data_vlan;
+};
+
+struct hinic_port_rt_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 pf_id;
+ u8 enable;
+ u8 rsvd1[6];
+};
+
+struct fw_support_func {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u64 flag;
+ u64 rsvd;
+};
+
+struct hinic_vf_dcb_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_dcb_state state;
+};
+
+struct hinic_port_funcs_state {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id; /* pf_id */
+ u8 drop_en;
+ u8 rsvd1;
+};
+
+struct hinic_reset_link_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_force_pkt_drop {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 port;
+ u8 rsvd1[3];
+};
+
+struct hinic_set_link_follow {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 follow_status;
+ u8 rsvd2[3];
+};
+
+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz);
+
+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn);
+
+int hinic_get_fw_support_func(void *hwdev);
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev);
+
+void hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id);
+
+int hinic_set_port_routine_cmd_report(void *hwdev, bool enable);
+
+int hinic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info);
+
+int hinic_save_dcb_state(struct hinic_hwdev *hwdev,
+ struct hinic_dcb_state *dcb_state);
+
+void hinic_clear_vf_infos(void *hwdev, u16 vf_id);
+
+/* OVS module interface, for BMGW cpath command */
+enum hinic_hiovs_cmd {
+ OVS_SET_CPATH_VLAN = 39,
+ OVS_GET_CPATH_VLAN = 40,
+ OVS_DEL_CPATH_VLAN = 43,
+};
+
+struct cmd_cpath_vlan {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 vlan_id;
+ u16 pf_id;
+};
+
+/* HILINK module interface */
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
+};
+
+enum hilink_info_print_event {
+ HILINK_EVENT_LINK_UP = 1,
+ HILINK_EVENT_LINK_DOWN,
+ HILINK_EVENT_CABLE_PLUGGED,
+ HILINK_EVENT_MAX_TYPE,
+};
+
+enum hinic_link_port_type {
+ LINK_PORT_FIBRE = 1,
+ LINK_PORT_ELECTRIC,
+ LINK_PORT_COPPER,
+ LINK_PORT_AOC,
+ LINK_PORT_BACKPLANE,
+ LINK_PORT_BASET,
+ LINK_PORT_MAX_TYPE,
+};
+
+enum hilink_fibre_subtype {
+ FIBRE_SUBTYPE_SR = 1,
+ FIBRE_SUBTYPE_LR,
+ FIBRE_SUBTYPE_MAX,
+};
+
+enum hilink_fec_type {
+ HILINK_FEC_RSFEC,
+ HILINK_FEC_BASEFEC,
+ HILINK_FEC_NOFEC,
+ HILINK_FEC_MAX_TYPE,
+};
+
+struct hi30_ffe_data {
+ u8 PRE2;
+ u8 PRE1;
+ u8 POST1;
+ u8 POST2;
+ u8 MAIN;
+};
+
+struct hi30_ctle_data {
+ u8 ctlebst[3];
+ u8 ctlecmband[3];
+ u8 ctlermband[3];
+ u8 ctleza[3];
+ u8 ctlesqh[3];
+ u8 ctleactgn[3];
+ u8 ctlepassgn;
+};
+
+struct hi30_dfe_data {
+ u8 fix_tap1_cen;
+ u8 fix_tap1_edg;
+ u8 dfefxtap[6];
+ u8 dfefloattap[6];
+};
+
+struct hilink_sfp_power {
+ u32 rx_power;
+ u32 tx_power;
+ u32 rsvd;
+ u32 is_invalid;
+};
+
+#define HILINK_MAX_LANE 4
+
+struct hilink_lane {
+ u8 lane_used;
+ u8 hi30_ffe[5];
+ u8 hi30_ctle[19];
+ u8 hi30_dfe[14];
+ u8 rsvd4;
+};
+
+struct hinic_link_info {
+ u8 vendor_name[16];
+ /* port type:
+ * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane;
+ * 6 - baseT; 0xffff - unknown
+ *
+ * port subtype:
+ * Only when port_type is fiber:
+ * 1 - SR; 2 - LR
+ */
+ u32 port_type;
+ u32 port_sub_type;
+ u32 cable_length;
+ u8 cable_temp;
+ u8 cable_max_speed;/* 1(G)/10(G)/25(G)... */
+ u8 sfp_type; /* 0 - qsfp; 1 - sfp */
+ u8 rsvd0;
+ u32 power[4]; /* uW; if is sfp, only power[2] is valid */
+
+ u8 an_state; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u16 speed; /* 1(G)/10(G)/25(G)... */
+
+ u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */
+ u8 alos; /* 0 - yes; 1 - no */
+ u8 rx_los; /* 0 - yes; 1 - no */
+ u8 pma_status;
+ u32 pma_dbg_info_reg; /* pma debug info: */
+ u32 pma_signal_ok_reg; /* signal ok: */
+
+ u32 pcs_err_blk_cnt_reg; /* error block counter: */
+ u32 rf_lf_status_reg; /* RF/LF status: */
+ u8 pcs_link_reg; /* pcs link: */
+ u8 mac_link_reg; /* mac link: */
+ u8 mac_tx_en;
+ u8 mac_rx_en;
+ u32 pcs_err_cnt;
+
+ /* struct hinic_hilink_lane: 40 bytes */
+ u8 lane1[40]; /* 25GE lane in old firmware */
+
+ u8 rsvd1[266]; /* hilink machine state */
+
+ u8 lane2[HILINK_MAX_LANE * 40]; /* max 4 lane for 40GE/100GE */
+
+ u8 rsvd2[2];
+};
+
+struct hinic_hilink_link_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 port_id;
+ u8 info_type; /* 1: link up 2: link down 3 cable plugged */
+ u8 rsvd1;
+
+ struct hinic_link_info info;
+
+ u8 rsvd2[352];
+};
+
+struct hinic_link_ksettings_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+
+ u32 valid_bitmap;
+ u32 speed; /* enum nic_speed_level */
+ u8 autoneg; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u8 rsvd2[18]; /* reserved for duplex, port, etc. */
+};
+
+enum hinic_tx_promsic {
+ HINIC_TX_PROMISC_ENABLE = 0,
+ HINIC_TX_PROMISC_DISABLE = 1,
+};
+
+struct hinic_promsic_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+ u16 func_id;
+ u8 cfg;
+ u8 rsvd1;
+};
+
+struct hinic_netq_cfg_msg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 netq_en;
+ u8 rsvd;
+};
+
+/* add/del rxq filter msg */
+struct hinic_rq_filter_msg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 qid;
+ u8 filter_type;
+ u8 qflag;/*0:stdq, 1:defq, 2: netq*/
+
+ u8 mac[6];
+ struct {
+ u8 inner_mac[6];
+ u32 vni;
+ } vxlan;
+};
+
+int hinic_get_hilink_link_info(void *hwdev, struct hinic_link_info *info);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c
new file mode 100644
index 000000000000..350d10e4997f
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwif.h"
+#include "hinic_csr.h"
+#include "hinic_msix_attr.h"
+
+#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
+
+/**
+ * hinic_msix_attr_set - set message attribute of msix entry
+ * @hwif: the hardware interface of a pci function device
+ * @msix_index: msix_index
+ * @pending_limit: the maximum pending interrupt events (unit 8)
+ * @coalesc_timer: coalesc period for interrupt (unit 8 us)
+ * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us)
+ * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
+ * @resend_timer: maximum wait for resending msix message
+ * (unit coalesc period)
+ * Return: 0 - success, negative - failure
+ */
+int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer_cfg, u8 lli_credit_limit,
+ u8 resend_timer)
+{
+ u32 msix_ctrl, addr;
+
+ if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
+ return -EINVAL;
+
+ msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) |
+ HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) |
+ HINIC_MSIX_ATTR_SET(lli_timer_cfg, LLI_TIMER) |
+ HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) |
+ HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER);
+
+ addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
+
+ hinic_hwif_write_reg(hwif, addr, msix_ctrl);
+
+ return 0;
+}
+
+/**
+ * hinic_msix_attr_get - get message attribute of msix entry
+ * @hwif: the hardware interface of a pci function device
+ * @msix_index: msix_index
+ * @pending_limit: the maximum pending interrupt events (unit 8)
+ * @coalesc_timer_cfg: coalesc period for interrupt (unit 8 us)
+ * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us)
+ * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
+ * @resend_timer_cfg: maximum wait for resending msix message
+ * (unit coalesc period)
+ * Return: 0 - success, negative - failure
+ */
+int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
+ u8 *pending_limit, u8 *coalesc_timer_cfg,
+ u8 *lli_timer_cfg, u8 *lli_credit_limit,
+ u8 *resend_timer_cfg)
+{
+ u32 addr, val;
+
+ if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
+ return -EINVAL;
+
+ addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
+ val = hinic_hwif_read_reg(hwif, addr);
+
+ *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
+ *coalesc_timer_cfg = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
+ *lli_timer_cfg = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
+ *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
+ *resend_timer_cfg = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
+
+ return 0;
+}
+
+/**
+ * hinic_msix_attr_cnt_set - set message attribute counters of msix entry
+ * @hwif: the hardware interface of a pci function device
+ * @msix_index: msix_index
+ * @lli_timer_cnt: replenishing period for low latency interrupt (unit 8 us)
+ * @lli_credit_cnt: maximum credits for low latency msix messages (unit 8)
+ * @coalesc_timer_cnt: coalesc period for interrupt (unit 8 us)
+ * @pending_cnt: the maximum pending interrupt events (unit 8)
+ * @resend_timer_cnt: maximum wait for resending msix message
+ * (unit coalesc period)
+ * Return: 0 - success, negative - failure
+ */
+int hinic_msix_attr_cnt_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 lli_timer_cnt, u8 lli_credit_cnt,
+ u8 coalesc_timer_cnt, u8 pending_cnt,
+ u8 resend_timer_cnt)
+{
+ u32 msix_ctrl, addr;
+
+ if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
+ return -EINVAL;
+
+ msix_ctrl = HINIC_MSIX_CNT_SET(lli_timer_cnt, LLI_TIMER) |
+ HINIC_MSIX_CNT_SET(lli_credit_cnt, LLI_CREDIT) |
+ HINIC_MSIX_CNT_SET(coalesc_timer_cnt, COALESC_TIMER) |
+ HINIC_MSIX_CNT_SET(pending_cnt, PENDING) |
+ HINIC_MSIX_CNT_SET(resend_timer_cnt, RESEND_TIMER);
+
+ addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index);
+
+ hinic_hwif_write_reg(hwif, addr, msix_ctrl);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h
new file mode 100644
index 000000000000..0397dfb41fca
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_MSIX_ATTR_H_
+#define HINIC_MSIX_ATTR_H_
+
+#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0
+#define HINIC_MSIX_COALESC_TIMER_SHIFT 8
+#define HINIC_MSIX_LLI_TIMER_SHIFT 16
+#define HINIC_MSIX_LLI_CREDIT_SHIFT 24
+#define HINIC_MSIX_RESEND_TIMER_SHIFT 29
+
+#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFFU
+#define HINIC_MSIX_COALESC_TIMER_MASK 0xFFU
+#define HINIC_MSIX_LLI_TIMER_MASK 0xFFU
+#define HINIC_MSIX_LLI_CREDIT_MASK 0x1FU
+#define HINIC_MSIX_RESEND_TIMER_MASK 0x7U
+
+#define HINIC_MSIX_ATTR_GET(val, member) \
+ (((val) >> HINIC_MSIX_##member##_SHIFT) \
+ & HINIC_MSIX_##member##_MASK)
+
+#define HINIC_MSIX_ATTR_SET(val, member) \
+ (((val) & HINIC_MSIX_##member##_MASK) \
+ << HINIC_MSIX_##member##_SHIFT)
+
+#define HINIC_MSIX_CNT_LLI_TIMER_SHIFT 0
+#define HINIC_MSIX_CNT_LLI_CREDIT_SHIFT 8
+#define HINIC_MSIX_CNT_COALESC_TIMER_SHIFT 8
+#define HINIC_MSIX_CNT_PENDING_SHIFT 8
+#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
+
+#define HINIC_MSIX_CNT_LLI_TIMER_MASK 0xFFU
+#define HINIC_MSIX_CNT_LLI_CREDIT_MASK 0xFFU
+#define HINIC_MSIX_CNT_COALESC_TIMER_MASK 0xFFU
+#define HINIC_MSIX_CNT_PENDING_MASK 0x1FU
+#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x7U
+
+#define HINIC_MSIX_CNT_SET(val, member) \
+ (((val) & HINIC_MSIX_CNT_##member##_MASK) << \
+ HINIC_MSIX_CNT_##member##_SHIFT)
+
+int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer_cfg, u8 lli_credit_limit,
+ u8 resend_timer);
+
+int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
+ u8 *pending_limit, u8 *coalesc_timer_cfg,
+ u8 *lli_timer_cfg, u8 *lli_credit_limit,
+ u8 *resend_timer_cfg);
+
+int hinic_msix_attr_cnt_set(struct hinic_hwif *hwif, u16 msix_index,
+ u8 pending_limit, u8 coalesc_timer,
+ u8 lli_timer_cfg, u8 lli_credit_limit,
+ u8 resend_timer);
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
new file mode 100644
index 000000000000..25cd46cba842
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c
@@ -0,0 +1,972 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwdev.h"
+#include "hinic_csr.h"
+#include "hinic_hwif.h"
+#include "hinic_nic_io.h"
+#include "hinic_api_cmd.h"
+#include "hinic_mgmt.h"
+#include "hinic_mbox.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_hwif.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_multi_host_mgmt.h"
+
+#define SLAVE_HOST_STATUS_CLEAR(host_id, val) \
+ ((val) & (~(1U << (host_id))))
+#define SLAVE_HOST_STATUS_SET(host_id, enable) \
+ (((u8)(enable) & 1U) << (host_id))
+#define SLAVE_HOST_STATUS_GET(host_id, val) (!!((val) & (1U << (host_id))))
+
+#define MULTI_HOST_PPF_GET(host_id, val) (((val) >> ((host_id) * 4 + 16)) & 0xf)
+
+static inline u8 get_master_host_ppf_idx(struct hinic_hwdev *hwdev)
+{
+ u32 reg_val;
+
+ reg_val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_MULT_HOST_SLAVE_STATUS_ADDR);
+ /* master host sets host_id to 0 */
+ return MULTI_HOST_PPF_GET(0, reg_val);
+}
+
+void set_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id, bool enable)
+{
+ u32 reg_val;
+
+ if (HINIC_FUNC_TYPE(hwdev) != TYPE_PPF)
+ return;
+
+ reg_val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_MULT_HOST_SLAVE_STATUS_ADDR);
+
+ reg_val = SLAVE_HOST_STATUS_CLEAR(host_id, reg_val);
+ reg_val |= SLAVE_HOST_STATUS_SET(host_id, enable);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_MULT_HOST_SLAVE_STATUS_ADDR,
+ reg_val);
+
+ sdk_info(hwdev->dev_hdl, "Set slave host %d status %d, reg value: 0x%x\n",
+ host_id, enable, reg_val);
+}
+
+bool hinic_get_slave_host_enable(void *hwdev, u8 host_id)
+{
+ u32 reg_val;
+ struct hinic_hwdev *dev = hwdev;
+
+ if (HINIC_FUNC_TYPE(dev) != TYPE_PPF)
+ return false;
+
+ reg_val = hinic_hwif_read_reg(dev->hwif,
+ HINIC_MULT_HOST_SLAVE_STATUS_ADDR);
+
+ return SLAVE_HOST_STATUS_GET(host_id, reg_val);
+}
+EXPORT_SYMBOL(hinic_get_slave_host_enable);
+
+void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool enable)
+{
+ u32 reg_val;
+
+ if (!IS_MASTER_HOST(hwdev) || HINIC_FUNC_TYPE(hwdev) != TYPE_PPF)
+ return;
+
+ reg_val = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR);
+ reg_val = MULTI_HOST_REG_CLEAR(reg_val, MASTER_MBX_STS);
+ reg_val |= MULTI_HOST_REG_SET((u8)enable, MASTER_MBX_STS);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR, reg_val);
+
+ sdk_info(hwdev->dev_hdl, "multi-host status %d, reg value: 0x%x\n",
+ enable, reg_val);
+}
+
+bool hinic_get_master_host_mbox_enable(void *hwdev)
+{
+ u32 reg_val;
+ struct hinic_hwdev *dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_SLAVE_HOST(dev) || HINIC_FUNC_TYPE(dev) == TYPE_VF)
+ return true;
+
+ reg_val = hinic_hwif_read_reg(dev->hwif, HINIC_HOST_MODE_ADDR);
+
+ return !!MULTI_HOST_REG_GET(reg_val, MASTER_MBX_STS);
+}
+
+void set_func_host_mode(struct hinic_hwdev *hwdev, enum hinic_func_mode mode)
+{
+ switch (mode) {
+ case FUNC_MOD_MULTI_BM_MASTER:
+ sdk_info(hwdev->dev_hdl, "Detect multi-host BM master host\n");
+ hwdev->func_mode = FUNC_MOD_MULTI_BM_MASTER;
+ hwdev->feature_cap = HINIC_MULTI_BM_MASTER;
+ break;
+ case FUNC_MOD_MULTI_BM_SLAVE:
+ sdk_info(hwdev->dev_hdl, "Detect multi-host BM slave host\n");
+ hwdev->func_mode = FUNC_MOD_MULTI_BM_SLAVE;
+ hwdev->feature_cap = HINIC_MULTI_BM_SLAVE;
+ break;
+ case FUNC_MOD_MULTI_VM_MASTER:
+ sdk_info(hwdev->dev_hdl, "Detect multi-host VM master host\n");
+ hwdev->func_mode = FUNC_MOD_MULTI_VM_MASTER;
+ hwdev->feature_cap = HINIC_MULTI_VM_MASTER;
+ break;
+ case FUNC_MOD_MULTI_VM_SLAVE:
+ sdk_info(hwdev->dev_hdl, "Detect multi-host VM slave host\n");
+ hwdev->func_mode = FUNC_MOD_MULTI_VM_SLAVE;
+ hwdev->feature_cap = HINIC_MULTI_VM_SLAVE;
+ break;
+ default:
+ hwdev->func_mode = FUNC_MOD_NORMAL_HOST;
+ hwdev->feature_cap = HINIC_NORMAL_HOST_CAP;
+ break;
+ }
+}
+
+bool is_multi_vm_slave(void *hwdev)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ return (hw_dev->func_mode == FUNC_MOD_MULTI_VM_SLAVE) ? true : false;
+}
+
+bool is_multi_bm_slave(void *hwdev)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+
+ if (!hwdev)
+ return false;
+
+ return (hw_dev->func_mode == FUNC_MOD_MULTI_BM_SLAVE) ? true : false;
+}
+
+int rectify_host_mode(struct hinic_hwdev *hwdev)
+{
+ u16 cur_sdi_mode;
+ int err;
+
+ if (hwdev->board_info.board_type !=
+ HINIC_BOARD_TYPE_MULTI_HOST_ETH_25GE)
+ return 0;
+
+ sdk_info(hwdev->dev_hdl, "Rectify host mode, host_id: %d\n",
+ hinic_pcie_itf_id(hwdev));
+
+ err = hinic_get_sdi_mode(hwdev, &cur_sdi_mode);
+ if (err == HINIC_MGMT_CMD_UNSUPPORTED)
+ cur_sdi_mode = HINIC_SDI_MODE_BM;
+ else if (err)
+ return err;
+
+ switch (cur_sdi_mode) {
+ case HINIC_SDI_MODE_BM:
+ if (hinic_pcie_itf_id(hwdev) == 0)
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_MASTER);
+ else
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_SLAVE);
+ break;
+ case HINIC_SDI_MODE_VM:
+ if (hinic_pcie_itf_id(hwdev) == 0)
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_MASTER);
+ else
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_SLAVE);
+ break;
+ default:
+ sdk_warn(hwdev->dev_hdl, "Unknown sdi mode %d\n", cur_sdi_mode);
+ break;
+ }
+
+ return 0;
+}
+
+void detect_host_mode_pre(struct hinic_hwdev *hwdev)
+{
+ enum hinic_chip_mode chip_mode;
+
+ /* all pf can set HOST_MODE REG, so don't trust HOST_MODE REG for host0,
+ * get chip mode from mgmt cpu for host0
+ * VF have not right to read HOST_MODE REG, detect mode from board info
+ */
+ if (hinic_pcie_itf_id(hwdev) == 0 ||
+ HINIC_FUNC_TYPE(hwdev) == TYPE_VF) {
+ set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST);
+ return;
+ }
+
+ chip_mode = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR);
+ switch (MULTI_HOST_REG_GET(chip_mode, CHIP_MODE)) {
+ case CHIP_MODE_VMGW:
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_SLAVE);
+ /* mbox has not initialized, set slave host disable */
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false);
+ break;
+ case CHIP_MODE_BMGW:
+ set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_SLAVE);
+ /* mbox has not initialized, set slave host disable */
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false);
+ break;
+
+ default:
+ set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST);
+ break;
+ }
+}
+
+int __mbox_to_host(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout,
+ enum hinic_mbox_ack_type ack_type)
+{
+ struct hinic_hwdev *mbox_hwdev = hwdev;
+ u8 dst_host_func_idx;
+ int err;
+
+ if (!IS_MULTI_HOST(hwdev) || HINIC_IS_VF(hwdev))
+ return -EPERM;
+
+ if (hinic_func_type(hwdev) == TYPE_PF) {
+ down(&hwdev->ppf_sem);
+ mbox_hwdev = hwdev->ppf_hwdev;
+ if (!mbox_hwdev) {
+ err = -EINVAL;
+ goto release_lock;
+ }
+
+ if (!hinic_is_hwdev_mod_inited(mbox_hwdev,
+ HINIC_HWDEV_MBOX_INITED)) {
+ err = -EPERM;
+ goto release_lock;
+ }
+ }
+
+ if (!mbox_hwdev->chip_present_flag) {
+ err = -EPERM;
+ goto release_lock;
+ }
+
+ if (!hinic_get_master_host_mbox_enable(hwdev)) {
+ sdk_err(hwdev->dev_hdl, "Master host not initialized\n");
+ err = -EFAULT;
+ goto release_lock;
+ }
+
+ if (!mbox_hwdev->mhost_mgmt) {
+ /* send to master host in default */
+ dst_host_func_idx = get_master_host_ppf_idx(hwdev);
+ } else {
+ dst_host_func_idx = IS_MASTER_HOST(hwdev) ?
+ mbox_hwdev->mhost_mgmt->shost_ppf_idx :
+ mbox_hwdev->mhost_mgmt->mhost_ppf_idx;
+ }
+
+ if (ack_type == MBOX_ACK)
+ err = hinic_mbox_to_host(mbox_hwdev, dst_host_func_idx,
+ mod, cmd, buf_in, in_size,
+ buf_out, out_size,
+ timeout);
+ else
+ err = hinic_mbox_to_func_no_ack(mbox_hwdev, dst_host_func_idx,
+ mod, cmd, buf_in, in_size);
+
+release_lock:
+ if (hinic_func_type(hwdev) == TYPE_PF)
+ up(&hwdev->ppf_sem);
+
+ return err;
+}
+
+int hinic_mbox_to_host_sync(void *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ if (!hwdev)
+ return -EINVAL;
+
+ return __mbox_to_host((struct hinic_hwdev *)hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size, timeout, MBOX_ACK);
+}
+EXPORT_SYMBOL(hinic_mbox_to_host_sync);
+
+int hinic_mbox_to_host_no_ack(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size)
+{
+ return __mbox_to_host(hwdev, mod, cmd, buf_in, in_size, NULL, NULL,
+ 0, MBOX_NO_ACK);
+}
+
+static int __get_func_nic_state_from_pf(struct hinic_hwdev *hwdev,
+ u16 glb_func_idx, u8 *en);
+
+int sw_func_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+ struct hinic_slave_func_nic_state *nic_state, *out_state;
+ int err;
+
+ switch (cmd) {
+ case HINIC_SW_CMD_GET_SLAVE_FUNC_NIC_STATE:
+ nic_state = buf_in;
+ out_state = buf_out;
+ *out_size = sizeof(*nic_state);
+
+ /* find nic state in ppf func_nic_en bitmap */
+ err = __get_func_nic_state_from_pf(hwdev, nic_state->func_idx,
+ &out_state->enable);
+ if (err)
+ out_state->status = 1;
+ else
+ out_state->status = 0;
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __master_host_sw_func_handler(struct hinic_hwdev *hwdev, u16 pf_idx,
+ u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt;
+ struct register_slave_host *slave_host, *out_shost;
+ int err = 0;
+
+ if (!mhost_mgmt)
+ return -ENXIO;
+
+ switch (cmd) {
+ case HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER:
+ slave_host = buf_in;
+ out_shost = buf_out;
+ *out_size = sizeof(*slave_host);
+ mhost_mgmt->shost_registered = true;
+ mhost_mgmt->shost_host_idx = slave_host->host_id;
+ mhost_mgmt->shost_ppf_idx = slave_host->ppf_idx;
+
+ bitmap_copy((ulong *)out_shost->funcs_nic_en,
+ mhost_mgmt->func_nic_en, HINIC_MAX_FUNCTIONS);
+ sdk_info(hwdev->dev_hdl, "slave host register ppf, host_id: %d, ppf_idx: %d\n",
+ slave_host->host_id, slave_host->ppf_idx);
+
+ out_shost->status = 0;
+ break;
+ case HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER:
+ slave_host = buf_in;
+ mhost_mgmt->shost_registered = false;
+ sdk_info(hwdev->dev_hdl, "slave host unregister ppf, host_id: %d, ppf_idx: %d\n",
+ slave_host->host_id, slave_host->ppf_idx);
+
+ *out_size = sizeof(*slave_host);
+ ((struct register_slave_host *)buf_out)->status = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int __event_set_func_nic_state(struct hinic_hwdev *hwdev, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_event_info event_info = {0};
+ struct hinic_mhost_nic_func_state nic_state = {0};
+ struct hinic_slave_func_nic_state *out_state, *func_nic_state = buf_in;
+
+ event_info.type = HINIC_EVENT_MULTI_HOST_MGMT;
+ event_info.mhost_mgmt.sub_cmd = HINIC_MHOST_NIC_STATE_CHANGE;
+ event_info.mhost_mgmt.data = &nic_state;
+
+ nic_state.func_idx = func_nic_state->func_idx;
+ nic_state.enable = func_nic_state->enable;
+
+ if (!hwdev->event_callback)
+ return -EFAULT;
+
+ hwdev->event_callback(hwdev->event_pri_handle, &event_info);
+
+ *out_size = sizeof(*out_state);
+ out_state = buf_out;
+ out_state->status = nic_state.status;
+
+ return nic_state.status;
+}
+
+static int multi_host_event_handler(struct hinic_hwdev *hwdev,
+ u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ int err;
+
+ switch (cmd) {
+ case HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE:
+ err = __event_set_func_nic_state(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int sw_fwd_msg_to_vf(struct hinic_hwdev *hwdev,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_host_fwd_head *fwd_head;
+ u16 fwd_head_len;
+ void *msg;
+ int err;
+
+ fwd_head = buf_in;
+ fwd_head_len = sizeof(struct hinic_host_fwd_head);
+ msg = (void *)((u8 *)buf_in + fwd_head_len);
+ err = hinic_mbox_ppf_to_vf(hwdev, fwd_head->mod,
+ fwd_head->dst_glb_func_idx, fwd_head->cmd,
+ msg, in_size - fwd_head_len,
+ buf_out, out_size, 0);
+ if (err)
+ nic_err(hwdev->dev_hdl,
+ "Fwd msg to func %u failed, err: %d\n",
+ fwd_head->dst_glb_func_idx, err);
+
+ return err;
+}
+
+static int __slave_host_sw_func_handler(struct hinic_hwdev *hwdev, u16 pf_idx,
+ u8 cmd, void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt;
+ struct hinic_slave_func_nic_state *nic_state;
+ int err = 0;
+
+ if (!mhost_mgmt)
+ return -ENXIO;
+
+ switch (cmd) {
+ case HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE:
+ nic_state = buf_in;
+
+ *out_size = sizeof(*nic_state);
+ ((struct hinic_slave_func_nic_state *)buf_out)->status = 0;
+
+ sdk_info(hwdev->dev_hdl, "slave func %d %s nic\n",
+ nic_state->func_idx,
+ nic_state->enable ? "register" : "unregister");
+
+ if (nic_state->enable)
+ set_bit(nic_state->func_idx, mhost_mgmt->func_nic_en);
+ else
+ clear_bit(nic_state->func_idx, mhost_mgmt->func_nic_en);
+
+ multi_host_event_handler(hwdev, cmd, buf_in, in_size, buf_out,
+ out_size);
+
+ break;
+
+ case HINIC_SW_CMD_SEND_MSG_TO_VF:
+ err = sw_fwd_msg_to_vf(hwdev, buf_in, in_size,
+ buf_out, out_size);
+ break;
+
+ case HINIC_SW_CMD_MIGRATE_READY:
+ hinic_migrate_report(hwdev);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+int sw_func_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+ int err;
+
+ if (IS_MASTER_HOST(hwdev))
+ err = __master_host_sw_func_handler(hwdev, pf_idx, cmd, buf_in,
+ in_size, buf_out, out_size);
+ else if (IS_SLAVE_HOST(hwdev))
+ err = __slave_host_sw_func_handler(hwdev, pf_idx, cmd, buf_in,
+ in_size, buf_out, out_size);
+ else
+ err = -EINVAL;
+
+ if (err)
+ sdk_err(hwdev->dev_hdl, "PPF process sw funcs cmd %d failed, err: %d\n",
+ cmd, err);
+
+ return err;
+}
+
+int __ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ int err;
+
+ if (IS_SLAVE_HOST(hwdev)) {
+ err = hinic_mbox_to_host_sync(hwdev, mod, cmd,
+ buf_in, in_size, buf_out,
+ out_size, 0);
+ if (err)
+ sdk_err(hwdev->dev_hdl, "send to mpf failed, err: %d\n",
+ err);
+ } else if (IS_MASTER_HOST(hwdev)) {
+ if (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_START_FLR)
+ err = hinic_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in,
+ in_size);
+ else
+ err = hinic_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in,
+ in_size, buf_out,
+ out_size, 0U);
+ if (err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ sdk_err(hwdev->dev_hdl, "PF mbox common callback handler err: %d\n",
+ err);
+ } else {
+ /* not support */
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+int hinic_ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ bool same_host = false;
+ int err = -EFAULT;
+
+ /* modify same_host according to hinic_get_hw_pf_infos */
+
+ switch (hwdev->func_mode) {
+ case FUNC_MOD_MULTI_VM_MASTER:
+ case FUNC_MOD_MULTI_BM_MASTER:
+ if (!same_host)
+ err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id,
+ mod, cmd, buf_in, in_size,
+ buf_out, out_size);
+ else
+ sdk_warn(hwdev->dev_hdl, "Don't support ppf mbox message in BM master\n");
+
+ break;
+ case FUNC_MOD_MULTI_VM_SLAVE:
+ case FUNC_MOD_MULTI_BM_SLAVE:
+ same_host = true;
+ if (same_host)
+ err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id,
+ mod, cmd, buf_in, in_size,
+ buf_out, out_size);
+ else
+ sdk_warn(hwdev->dev_hdl, "Receive control message from BM master, don't support for now\n");
+
+ break;
+ default:
+ sdk_warn(hwdev->dev_hdl, "Don't support ppf mbox message\n");
+
+ break;
+ }
+
+ return err;
+}
+
+int comm_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id, HINIC_MOD_COMM,
+ cmd, buf_in, in_size, buf_out,
+ out_size);
+}
+
+void comm_ppf_to_pf_handler(void *handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+
+ sdk_err(hwdev->dev_hdl, "pf receive ppf common mbox msg, don't supported for now\n");
+}
+
+int hilink_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id,
+ HINIC_MOD_HILINK, cmd, buf_in,
+ in_size, buf_out, out_size);
+}
+
+int hinic_nic_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id,
+ HINIC_MOD_L2NIC, cmd, buf_in, in_size,
+ buf_out, out_size);
+}
+
+void hinic_nic_ppf_to_pf_handler(void *handle, u8 cmd,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+
+ sdk_err(hwdev->dev_hdl, "ppf receive other pf l2nic mbox msg, don't supported for now\n");
+}
+
+int hinic_register_slave_ppf(struct hinic_hwdev *hwdev, bool registered)
+{
+ struct register_slave_host host_info = {0};
+ u16 out_size = sizeof(host_info);
+ u8 cmd;
+ int err;
+
+ if (!IS_SLAVE_HOST(hwdev))
+ return -EINVAL;
+
+ cmd = registered ? HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER :
+ HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER;
+
+ host_info.host_id = hinic_pcie_itf_id(hwdev);
+ host_info.ppf_idx = hinic_ppf_idx(hwdev);
+
+ err = hinic_mbox_to_host_sync(hwdev, HINIC_MOD_SW_FUNC, cmd,
+ &host_info, sizeof(host_info), &host_info,
+ &out_size, 0);
+ if (err || !out_size || host_info.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to %s slave host, err: %d, out_size: 0x%x, status: 0x%x\n",
+ registered ? "register" : "unregister", err, out_size,
+ host_info.status);
+ return -EFAULT;
+ }
+ bitmap_copy(hwdev->mhost_mgmt->func_nic_en,
+ (ulong *)host_info.funcs_nic_en,
+ HINIC_MAX_FUNCTIONS);
+ return 0;
+}
+
+static int get_host_id_by_func_id(struct hinic_hwdev *hwdev, u16 func_idx,
+ u8 *host_id)
+{
+ struct hinic_hw_pf_infos *pf_infos;
+ u16 vf_id_start, vf_id_end;
+ int i;
+
+ if (!hwdev || !host_id || !hwdev->mhost_mgmt)
+ return -EINVAL;
+
+ pf_infos = &hwdev->mhost_mgmt->pf_infos;
+
+ for (i = 0; i < pf_infos->num_pfs; i++) {
+ if (func_idx == pf_infos->infos[i].glb_func_idx) {
+ *host_id = pf_infos->infos[i].itf_idx;
+ return 0;
+ }
+
+ vf_id_start = pf_infos->infos[i].glb_pf_vf_offset + 1;
+ vf_id_end = pf_infos->infos[i].glb_pf_vf_offset +
+ pf_infos->infos[i].max_vfs;
+ if (func_idx >= vf_id_start && func_idx <= vf_id_end) {
+ *host_id = pf_infos->infos[i].itf_idx;
+ return 0;
+ }
+ }
+
+ return -EFAULT;
+}
+
+int set_slave_func_nic_state(struct hinic_hwdev *hwdev, u16 func_idx, u8 en)
+{
+ struct hinic_slave_func_nic_state nic_state = {0};
+ u16 out_size = sizeof(nic_state);
+ int err;
+
+ nic_state.func_idx = func_idx;
+ nic_state.enable = en;
+
+ err = hinic_mbox_to_host_sync(hwdev, HINIC_MOD_SW_FUNC,
+ HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE,
+ &nic_state, sizeof(nic_state), &nic_state,
+ &out_size, 0);
+ if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) {
+ sdk_warn(hwdev->dev_hdl, "Can not notify func %d nic state because slave host not initialized\n",
+ func_idx);
+ } else if (err || !out_size || nic_state.status) {
+ sdk_err(hwdev->dev_hdl, "Failed to set slave host functions nic state, err: %d, out_size: 0x%x, status: 0x%x\n",
+ err, out_size, nic_state.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state *state)
+{
+ struct hinic_hwdev *ppf_hwdev = hwdev;
+ struct hinic_multi_host_mgmt *mhost_mgmt;
+ u8 host_id = 0;
+ bool host_enable;
+ int err;
+ int old_state;
+
+ if (!hwdev || !state)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) != TYPE_PPF)
+ ppf_hwdev = ((struct hinic_hwdev *)hwdev)->ppf_hwdev;
+
+ if (!ppf_hwdev || !IS_MASTER_HOST(ppf_hwdev))
+ return -EINVAL;
+
+ mhost_mgmt = ppf_hwdev->mhost_mgmt;
+ if (!mhost_mgmt || state->func_idx >= HINIC_MAX_FUNCTIONS)
+ return -EINVAL;
+
+ old_state = test_bit(state->func_idx, mhost_mgmt->func_nic_en) ? 1 : 0;
+ if (state->state == HINIC_FUNC_NIC_DEL)
+ clear_bit(state->func_idx, mhost_mgmt->func_nic_en);
+ else if (state->state == HINIC_FUNC_NIC_ADD)
+ set_bit(state->func_idx, mhost_mgmt->func_nic_en);
+ else
+ return -EINVAL;
+
+ err = get_host_id_by_func_id(ppf_hwdev, state->func_idx, &host_id);
+ if (err) {
+ sdk_err(ppf_hwdev->dev_hdl, "Failed to get function %d host id, err: %d\n",
+ state->func_idx, err);
+ old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) :
+ clear_bit(state->func_idx, mhost_mgmt->func_nic_en);
+ return -EFAULT;
+ }
+
+ host_enable = hinic_get_slave_host_enable(hwdev, host_id);
+ sdk_info(ppf_hwdev->dev_hdl, "Set slave host %d(status: %d) func %d %s nic\n",
+ host_id, host_enable,
+ state->func_idx, state->state ? "enable" : "disable");
+
+ if (!host_enable)
+ return 0;
+
+ /* notify slave host */
+ err = set_slave_func_nic_state(hwdev, state->func_idx, state->state);
+ if (err) {
+ old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) :
+ clear_bit(state->func_idx, mhost_mgmt->func_nic_en);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_func_nic_state);
+
+static int __get_func_nic_state_from_pf(struct hinic_hwdev *hwdev,
+ u16 glb_func_idx, u8 *en)
+{
+ struct hinic_multi_host_mgmt *mhost_mgmt;
+ struct hinic_hwdev *ppf_hwdev = hwdev;
+
+ if (hinic_func_type(hwdev) != TYPE_PPF)
+ ppf_hwdev = ((struct hinic_hwdev *)hwdev)->ppf_hwdev;
+
+ if (!ppf_hwdev || !ppf_hwdev->mhost_mgmt)
+ return -EFAULT;
+
+ mhost_mgmt = ppf_hwdev->mhost_mgmt;
+ *en = !!(test_bit(glb_func_idx, mhost_mgmt->func_nic_en));
+
+ sdk_info(ppf_hwdev->dev_hdl, "slave host func %d nic %d\n",
+ glb_func_idx, *en);
+
+ return 0;
+}
+
+int hinic_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en)
+{
+ struct hinic_slave_func_nic_state nic_state = {0};
+ u16 out_size = sizeof(nic_state);
+ u8 nic_en;
+ int err;
+
+ if (!hwdev || !en)
+ return -EINVAL;
+ /*if card mode is OVS, VFs donot need attach_uld, so return false.*/
+ if (!IS_SLAVE_HOST((struct hinic_hwdev *)hwdev)) {
+ if (hinic_func_type(hwdev) == TYPE_VF &&
+ hinic_support_ovs(hwdev, NULL)) {
+ *en = false;
+ } else {
+ *en = true;
+ }
+ return 0;
+ }
+
+ if (hinic_func_type(hwdev) == TYPE_VF) {
+ nic_state.func_idx = glb_func_idx;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_SW_FUNC,
+ HINIC_SW_CMD_GET_SLAVE_FUNC_NIC_STATE,
+ &nic_state, sizeof(nic_state),
+ &nic_state, &out_size, 0);
+ if (err || !out_size || nic_state.status) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Failed to get func %d nic state, err: %d, out_size: 0x%x, status: 0x%x\n",
+ glb_func_idx, err, out_size, nic_state.status);
+ return -EFAULT;
+ }
+
+ *en = !!nic_state.enable;
+
+ return 0;
+ }
+
+ /* pf in slave host should be probe in CHIP_MODE_VMGW
+ * mode for pxe install
+ */
+ if (IS_VM_SLAVE_HOST((struct hinic_hwdev *)hwdev)) {
+ *en = true;
+ return 0;
+ }
+
+ /* pf/ppf get function nic state in sdk diretly */
+ err = __get_func_nic_state_from_pf(hwdev, glb_func_idx, &nic_en);
+ if (err)
+ return err;
+
+ *en = !!nic_en;
+
+ return 0;
+}
+
+int hinic_multi_host_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ int err;
+
+ if (!IS_MULTI_HOST(hwdev) || !HINIC_IS_PPF(hwdev))
+ return 0;
+
+ hwdev->mhost_mgmt = kzalloc(sizeof(*hwdev->mhost_mgmt), GFP_KERNEL);
+ if (!hwdev->mhost_mgmt)
+ return -ENOMEM;
+
+ hwdev->mhost_mgmt->mhost_ppf_idx = get_master_host_ppf_idx(hwdev);
+ hwdev->mhost_mgmt->shost_ppf_idx = 0;
+ hwdev->mhost_mgmt->shost_host_idx = 2;
+
+ err = hinic_get_hw_pf_infos(hwdev, &hwdev->mhost_mgmt->pf_infos);
+ if (err)
+ goto out_free_mhost_mgmt;
+
+ hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ comm_ppf_mbox_handler);
+ hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ hinic_nic_ppf_mbox_handler);
+ hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_HILINK,
+ hilink_ppf_mbox_handler);
+ hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC,
+ sw_func_ppf_mbox_handler);
+
+ bitmap_zero(hwdev->mhost_mgmt->func_nic_en, HINIC_MAX_FUNCTIONS);
+
+ /* Slave host:
+ * register slave host ppf functions
+ * Get function's nic state
+ */
+ if (IS_SLAVE_HOST(hwdev)) {
+ /* PXE don't support to receive mbox from master host */
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), true);
+ if ((IS_VM_SLAVE_HOST(hwdev) &&
+ hinic_get_master_host_mbox_enable(hwdev)) ||
+ IS_BMGW_SLAVE_HOST(hwdev)) {
+ err = hinic_register_slave_ppf(hwdev, true);
+ if (err) {
+ set_slave_host_enable(hwdev,
+ hinic_pcie_itf_id(hwdev),
+ false);
+ goto out_free_mhost_mgmt;
+ }
+ }
+ } else {
+ /* slave host can send message to mgmt cpu after setup master
+ * mbox
+ */
+ set_master_host_mbox_enable(hwdev, true);
+ }
+
+ return 0;
+
+out_free_mhost_mgmt:
+ kfree(hwdev->mhost_mgmt);
+ hwdev->mhost_mgmt = NULL;
+
+ return err;
+}
+
+int hinic_multi_host_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ if (!IS_MULTI_HOST(hwdev) || !HINIC_IS_PPF(hwdev))
+ return 0;
+
+ if (IS_SLAVE_HOST(hwdev)) {
+ hinic_register_slave_ppf(hwdev, false);
+
+ set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false);
+ } else {
+ set_master_host_mbox_enable(hwdev, false);
+ }
+
+ hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_HILINK);
+ hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC);
+
+ kfree(hwdev->mhost_mgmt);
+ hwdev->mhost_mgmt = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
new file mode 100644
index 000000000000..f35b2a1f7989
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __HINIC_MULTI_HOST_MGMT_H_
+#define __HINIC_MULTI_HOST_MGMT_H_
+
+#define IS_BMGW_MASTER_HOST(hwdev) \
+ ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_MASTER)
+#define IS_BMGW_SLAVE_HOST(hwdev) \
+ ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_SLAVE)
+#define IS_VM_MASTER_HOST(hwdev) \
+ ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_MASTER)
+#define IS_VM_SLAVE_HOST(hwdev) \
+ ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_SLAVE)
+
+#define IS_MASTER_HOST(hwdev) \
+ (IS_BMGW_MASTER_HOST(hwdev) || IS_VM_MASTER_HOST(hwdev))
+
+#define IS_SLAVE_HOST(hwdev) \
+ (IS_BMGW_SLAVE_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev))
+
+#define IS_MULTI_HOST(hwdev) \
+ (IS_BMGW_MASTER_HOST(hwdev) || IS_BMGW_SLAVE_HOST(hwdev) || \
+ IS_VM_MASTER_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev))
+
+#define NEED_MBOX_FORWARD(hwdev) IS_BMGW_SLAVE_HOST(hwdev)
+
+struct hinic_multi_host_mgmt {
+ struct hinic_hwdev *hwdev;
+
+ /* slave host registered */
+ bool shost_registered;
+ u8 shost_host_idx;
+ u8 shost_ppf_idx;
+
+ /* slave host functios support nic enable */
+ DECLARE_BITMAP(func_nic_en, HINIC_MAX_FUNCTIONS);
+
+ u8 mhost_ppf_idx;
+
+ struct hinic_hw_pf_infos pf_infos;
+};
+
+struct hinic_host_fwd_head {
+ unsigned short dst_glb_func_idx;
+ unsigned char dst_itf_idx;
+ unsigned char mod;
+
+ unsigned char cmd;
+ unsigned char rsv[3];
+};
+
+int hinic_multi_host_mgmt_init(struct hinic_hwdev *hwdev);
+int hinic_multi_host_mgmt_free(struct hinic_hwdev *hwdev);
+int hinic_mbox_to_host_no_ack(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size);
+
+struct register_slave_host {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u8 host_id;
+ u8 ppf_idx;
+ u8 rsvd2[6];
+
+ /* for max 512 functions */
+ u64 funcs_nic_en[8];
+
+ u64 rsvd3[8];
+};
+
+struct hinic_slave_func_nic_state {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u16 func_idx;
+ u8 enable;
+ u8 rsvd1;
+
+ u32 rsvd2[2];
+};
+
+void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool enable);
+void set_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id, bool enable);
+void set_func_host_mode(struct hinic_hwdev *hwdev, enum hinic_func_mode mode);
+int rectify_host_mode(struct hinic_hwdev *hwdev);
+void detect_host_mode_pre(struct hinic_hwdev *hwdev);
+
+int sw_func_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic.h b/drivers/net/ethernet/huawei/hinic/hinic_nic.h
new file mode 100644
index 000000000000..f2fb93773232
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_NIC_H_
+#define HINIC_NIC_H_
+
+#include "hinic_wq.h"
+
+#define SET_VPORT_MBOX_TIMEOUT (30 * 1000)
+#define SET_VPORT_MGMT_TIMEOUT (25 * 1000)
+struct hinic_sq {
+ struct hinic_wq *wq;
+
+ u16 q_id;
+
+ u8 owner;
+
+ void *cons_idx_addr;
+
+ u8 __iomem *db_addr;
+ u16 msix_entry_idx;
+};
+
+struct hinic_rq {
+ struct hinic_wq *wq;
+
+ u16 *pi_virt_addr;
+ dma_addr_t pi_dma_addr;
+
+ u16 q_id;
+
+ u32 irq_id;
+ u16 msix_entry_idx;
+
+ dma_addr_t cqe_dma_addr;
+};
+
+struct hinic_qp {
+ struct hinic_sq sq;
+ struct hinic_rq rq;
+};
+
+struct vf_data_storage {
+ u8 vf_mac_addr[ETH_ALEN];
+ bool registered;
+ bool pf_set_mac;
+ u16 pf_vlan;
+ u8 pf_qos;
+ u32 max_rate;
+ u32 min_rate;
+
+ bool link_forced;
+ bool link_up; /* only valid if VF link is forced */
+ bool spoofchk;
+ bool trust;
+};
+
+struct hinic_nic_cfg {
+ struct semaphore cfg_lock;
+
+ /* Valid when pfc is disable */
+ bool pause_set;
+ struct nic_pause_config nic_pause;
+
+ u8 pfc_en;
+ u8 pfc_bitmap;
+
+ struct nic_port_info port_info;
+
+ /* percentage of pf link bandwidth */
+ u32 pf_bw_limit;
+};
+
+struct hinic_nic_io {
+ struct hinic_hwdev *hwdev;
+
+ u16 global_qpn;
+ u8 link_status;
+
+ struct hinic_wqs wqs;
+
+ struct hinic_wq *sq_wq;
+ struct hinic_wq *rq_wq;
+
+ u16 max_qps;
+ u16 num_qps;
+ u16 sq_depth;
+ u16 rq_depth;
+ struct hinic_qp *qps;
+
+ void *ci_vaddr_base;
+ dma_addr_t ci_dma_base;
+
+ u16 max_vfs;
+ struct vf_data_storage *vf_infos;
+
+ struct hinic_dcb_state dcb_state;
+
+ struct hinic_nic_cfg nic_cfg;
+ u16 rx_buff_len;
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
new file mode 100644
index 000000000000..f18e8f1366f0
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c
@@ -0,0 +1,3995 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_mbox.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_hwif.h"
+
+static unsigned char set_vf_link_state;
+module_param(set_vf_link_state, byte, 0444);
+MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0.");
+
+#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size)\
+ hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, cmd, \
+ buf_in, in_size, \
+ buf_out, out_size, 0)
+
+#define l2nic_msg_to_mgmt_async(hwdev, cmd, buf_in, in_size) \
+ hinic_msg_to_mgmt_async(hwdev, HINIC_MOD_L2NIC, cmd, buf_in, in_size)
+
+#define CPATH_FUNC_ID_VALID_LIMIT 2
+#define CHECK_IPSU_15BIT 0X8000
+
+static int hinic_set_rx_lro_timer(void *hwdev, u32 timer_value);
+
+static bool check_func_table(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_function_table *function_table;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ function_table = (struct hinic_function_table *)buf_in;
+
+ if (!function_table->rx_wqe_buf_size)
+ return false;
+
+ return true;
+}
+
+static bool check_rxcsum_setting(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_checksum_offload *rx_csum_cfg = NULL;
+
+ if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
+ return false;
+
+ rx_csum_cfg = buf_in;
+ if (rx_csum_cfg->rx_csum_offload != HINIC_RX_CSUM_OFFLOAD_EN)
+ return false;
+
+ return true;
+}
+
+static bool check_force_pkt_drop(struct hinic_hwdev *hwdev, u16 func_idx,
+ void *buf_in, u16 in_size)
+{
+ struct hinic_force_pkt_drop *pkt_drop = buf_in;
+
+ if (pkt_drop->port != hinic_physical_port_id(hwdev))
+ return false;
+
+ return true;
+}
+
+struct vf_cmd_check_handle nic_cmd_support_vf[] = {
+ {HINIC_PORT_CMD_VF_REGISTER, NULL},
+ {HINIC_PORT_CMD_VF_UNREGISTER, NULL},
+
+ {HINIC_PORT_CMD_CHANGE_MTU, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_ADD_VLAN, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_DEL_VLAN, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_MAC, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_MAC, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_DEL_MAC, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_RX_MODE, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_PAUSE_INFO, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_LINK_STATE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_LRO, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RX_CSUM, check_rxcsum_setting},
+ {HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_VPORT_STAT, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_CLEAN_VPORT_STAT, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
+ hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL,
+ hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_RSS_TEMP_MGR, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_RSS_CFG, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_INIT_FUNC, check_func_table},
+ {HINIC_PORT_CMD_SET_LLI_PRI, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_MGMT_VERSION, NULL},
+ {HINIC_PORT_CMD_GET_BOOT_VERSION, NULL},
+ {HINIC_PORT_CMD_GET_MICROCODE_VERSION, NULL},
+
+ {HINIC_PORT_CMD_GET_VPORT_ENABLE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_VPORT_ENABLE, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_LRO, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_GLOBAL_QPN, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_TSO, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_RQ_IQ_MAP, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_LINK_STATUS_REPORT, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_PORT_INFO, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_IPSU_MAC, hinic_mbox_check_func_id_10B},
+ {HINIC_PORT_CMD_GET_IPSU_MAC, hinic_mbox_check_func_id_10B},
+
+ {HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_CLEAR_SQ_RES, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_SUPER_CQE, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_GET_VF_COS, NULL},
+ {HINIC_PORT_CMD_SET_VHD_CFG, hinic_mbox_check_func_id_8B},
+
+ {HINIC_PORT_CMD_SET_VLAN_FILTER, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_FORCE_PKT_DROP, check_force_pkt_drop},
+};
+
+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz)
+{
+ struct hinic_function_table function_table = {0};
+ u16 out_size = sizeof(function_table);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &function_table.func_id);
+ if (err)
+ return err;
+
+ function_table.version = HINIC_CMD_VER_FUNC_ID;
+ function_table.mtu = 0x3FFF; /* default, max mtu */
+ function_table.rx_wqe_buf_size = rx_buf_sz;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_INIT_FUNC,
+ &function_table, sizeof(function_table),
+ &function_table, &out_size, 0);
+ if (err || function_table.status || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to init func table, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, function_table.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn)
+{
+ struct hinic_cmd_qpn cmd_qpn = {0};
+ u16 out_size = sizeof(cmd_qpn);
+ int err;
+
+ if (!hwdev || !global_qpn)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &cmd_qpn.func_id);
+ if (err)
+ return err;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_GET_GLOBAL_QPN,
+ &cmd_qpn, sizeof(cmd_qpn), &cmd_qpn,
+ &out_size, 0);
+ if (err || !out_size || cmd_qpn.status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, cmd_qpn.status, out_size);
+ return -EINVAL;
+ }
+
+ *global_qpn = cmd_qpn.base_qpn;
+
+ return 0;
+}
+
+int hinic_get_fw_support_func(void *hwdev)
+{
+ struct fw_support_func support_flag = {0};
+ struct hinic_hwdev *dev = hwdev;
+ u16 out_size = sizeof(support_flag);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_GET_FW_SUPPORT_FLAG,
+ &support_flag, sizeof(support_flag),
+ &support_flag, &out_size, 0);
+ if (support_flag.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ nic_info(dev->dev_hdl, "Current firmware doesn't support to get function capability\n");
+ support_flag.flag = 0;
+ } else if (support_flag.status || err || !out_size) {
+ nic_err(dev->dev_hdl, "Failed to get function capability, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, support_flag.status, out_size);
+ return -EFAULT;
+ }
+
+ dev->fw_support_func_flag = support_flag.flag;
+
+ return 0;
+}
+
+#define HINIC_ADD_VLAN_IN_MAC 0x8000
+#define HINIC_VLAN_ID_MASK 0x7FFF
+#define PF_SET_VF_MAC(hwdev, status) \
+ (HINIC_FUNC_TYPE(hwdev) == TYPE_VF && \
+ (status) == HINIC_PF_SET_VF_ALREADY)
+
+static int hinic_check_mac_status(struct hinic_hwdev *hwdev, u8 status,
+ u16 vlan_id)
+{
+ if ((status && status != HINIC_MGMT_STATUS_EXIST) ||
+ (vlan_id & CHECK_IPSU_15BIT && status == HINIC_MGMT_STATUS_EXIST)) {
+ if (PF_SET_VF_MAC(hwdev, status))
+ return 0;
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_mac_set mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || !out_size ||
+ hinic_check_mac_status(hwdev, mac_info.status, mac_info.vlan_id)) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EIO;
+ }
+
+ if (PF_SET_VF_MAC(nic_hwdev, mac_info.status)) {
+ nic_warn(nic_hwdev->dev_hdl, "PF has already set VF mac, Ignore set operation\n");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (mac_info.status == HINIC_MGMT_STATUS_EXIST) {
+ nic_warn(nic_hwdev->dev_hdl, "MAC is repeated. Ignore update operation\n");
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_mac);
+
+int hinic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_mac_set mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || !out_size ||
+ (mac_info.status && !PF_SET_VF_MAC(nic_hwdev, mac_info.status))) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EIO;
+ }
+ if (PF_SET_VF_MAC(nic_hwdev, mac_info.status)) {
+ nic_warn(nic_hwdev->dev_hdl, "PF has already set VF mac, Ignore delete operation\n");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_del_mac);
+
+int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id,
+ u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_mac_update mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !old_mac || !new_mac)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.old_mac, old_mac, ETH_ALEN);
+ memcpy(mac_info.new_mac, new_mac, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UPDATE_MAC,
+ &mac_info, sizeof(mac_info),
+ &mac_info, &out_size);
+ if (err || !out_size ||
+ hinic_check_mac_status(hwdev, mac_info.status, mac_info.vlan_id)) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EIO;
+ }
+
+ if (PF_SET_VF_MAC(nic_hwdev, mac_info.status)) {
+ nic_warn(nic_hwdev->dev_hdl, "PF has already set VF MAC. Ignore update operation\n");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (mac_info.status == HINIC_MGMT_STATUS_EXIST) {
+ nic_warn(nic_hwdev->dev_hdl, "MAC is repeated. Ignore update operation\n");
+ return 0;
+ }
+
+ return 0;
+}
+
+int hinic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct vf_data_storage *vf_info;
+ u16 func_id, vlan_id;
+ int err;
+
+ if (!hwdev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID)
+ return -EINVAL;
+
+ vf_info = dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (!vf_info->pf_set_mac)
+ return 0;
+
+ if (!FW_SUPPORT_MAC_REUSE_FUNC(dev)) {
+ nic_info(dev->dev_hdl, "Current firmware doesn't support mac reuse\n");
+ return 0;
+ }
+
+ func_id = hinic_glb_pf_vf_offset(dev) + (u16)vf_id;
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+ err = hinic_del_mac(dev, vf_info->vf_mac_addr, vlan_id,
+ func_id);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to delete VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, vlan_id);
+ return err;
+ }
+
+ vlan_id = new_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+ err = hinic_set_mac(dev, vf_info->vf_mac_addr, vlan_id,
+ func_id);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to add VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, vlan_id);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+ hinic_set_mac(dev, vf_info->vf_mac_addr, vlan_id,
+ func_id);
+
+ return err;
+}
+
+int hinic_get_default_mac(void *hwdev, struct net_device *netdev)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_mac_set mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !netdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &mac_info.func_id);
+ if (err)
+ return err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MAC,
+ &mac_info, sizeof(mac_info),
+ &mac_info, &out_size);
+ if (err || !out_size || mac_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ eth_hw_addr_set(netdev, mac_info.mac);
+ return 0;
+}
+
+int hinic_set_port_mtu(void *hwdev, u32 new_mtu)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_mtu mtu_info = {0};
+ u16 out_size = sizeof(mtu_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (new_mtu < HINIC_MIN_MTU_SIZE) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Invalid mtu size, mtu size < %dbytes\n",
+ HINIC_MIN_MTU_SIZE);
+ return -EINVAL;
+ }
+
+ if (new_mtu > HINIC_MAX_JUMBO_FRAME_SIZE) {
+ nic_err(nic_hwdev->dev_hdl, "Invalid mtu size, mtu size > %dbytes\n",
+ HINIC_MAX_JUMBO_FRAME_SIZE);
+ return -EINVAL;
+ }
+
+ err = hinic_global_func_id_get(hwdev, &mtu_info.func_id);
+ if (err)
+ return err;
+
+ mtu_info.mtu = new_mtu;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
+ &mtu_info, sizeof(mtu_info),
+ &mtu_info, &out_size);
+ if (err || !out_size || mtu_info.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mtu_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_hiovs_set_cpath_vlan(void *hwdev, u16 vlan_id, u16 pf_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct cmd_cpath_vlan cpath_vlan_info = {0};
+ u16 out_size = sizeof(cpath_vlan_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cpath_vlan_info.pf_id = pf_id;
+ cpath_vlan_info.vlan_id = vlan_id;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_OVS, OVS_SET_CPATH_VLAN,
+ &cpath_vlan_info, sizeof(cpath_vlan_info),
+ &cpath_vlan_info, &out_size, 0);
+
+ if (err || !out_size || cpath_vlan_info.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to set cpath vlan, err: %d, status: 0x%x, out_size: 0x%0x\n",
+ err, cpath_vlan_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_hiovs_del_cpath_vlan(void *hwdev, u16 vlan_id, u16 pf_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct cmd_cpath_vlan cpath_vlan_info = {0};
+ u16 out_size = sizeof(cpath_vlan_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cpath_vlan_info.pf_id = pf_id;
+ cpath_vlan_info.vlan_id = vlan_id;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_OVS, OVS_DEL_CPATH_VLAN,
+ &cpath_vlan_info, sizeof(cpath_vlan_info),
+ &cpath_vlan_info, &out_size, 0);
+
+ if (err || !out_size || cpath_vlan_info.status) {
+ sdk_err(nic_hwdev->dev_hdl, "Failed to delte cpath vlan, err: %d, status: 0x%x, out_size: 0x%0x\n",
+ err, cpath_vlan_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_enable_netq(void *hwdev, u8 en)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_netq_cfg_msg netq_cfg = {0};
+ u16 out_size = sizeof(netq_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &netq_cfg.func_id);
+ if (err)
+ return err;
+
+ netq_cfg.netq_en = en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_NETQ,
+ &netq_cfg, sizeof(netq_cfg),
+ &netq_cfg, &out_size);
+ if (netq_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ nic_warn(nic_hwdev->dev_hdl, "Not support enable netq\n");
+ } else if (err || !out_size || netq_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to enable netq, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, netq_cfg.status, out_size);
+ }
+
+ return err;
+}
+
+int hinic_add_hw_rqfilter(void *hwdev, struct hinic_rq_filter_info *filter_info)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rq_filter_msg filter_msg = {0};
+ u16 out_size = sizeof(filter_msg);
+ int err;
+
+ if (!hwdev || !filter_info)
+ return -EINVAL;
+
+ switch (filter_info->filter_type) {
+ case HINIC_RQ_FILTER_TYPE_MAC_ONLY:
+ memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN);
+ break;
+ case HINIC_RQ_FILTER_TYPE_VXLAN:
+ memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN);
+ memcpy(filter_msg.vxlan.inner_mac,
+ filter_info->vxlan.inner_mac, ETH_ALEN);
+ filter_msg.vxlan.vni = filter_info->vxlan.vni;
+ break;
+ default:
+ nic_warn(nic_hwdev->dev_hdl, "No support filter type: 0x%x\n",
+ filter_info->filter_type);
+ return -EINVAL;
+ }
+
+ err = hinic_global_func_id_get(hwdev, &filter_msg.func_id);
+ if (err)
+ return err;
+
+ filter_msg.filter_type = filter_info->filter_type;
+ filter_msg.qid = filter_info->qid;
+ filter_msg.qflag = filter_info->qflag;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_ADD_RQ_FILTER,
+ &filter_msg, sizeof(filter_msg),
+ &filter_msg, &out_size);
+ if (filter_msg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ nic_warn(nic_hwdev->dev_hdl, "Not support add rxq filter\n");
+ } else if (err || !out_size || filter_msg.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to add RX qfilter, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, filter_msg.status, out_size);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+int hinic_del_hw_rqfilter(void *hwdev, struct hinic_rq_filter_info *filter_info)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rq_filter_msg filter_msg = {0};
+ u16 out_size = sizeof(filter_msg);
+ int err;
+
+ if (!hwdev || !filter_info)
+ return -EINVAL;
+
+ switch (filter_info->filter_type) {
+ case HINIC_RQ_FILTER_TYPE_MAC_ONLY:
+ memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN);
+ break;
+ case HINIC_RQ_FILTER_TYPE_VXLAN:
+ memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN);
+ memcpy(filter_msg.vxlan.inner_mac,
+ filter_info->vxlan.inner_mac, ETH_ALEN);
+ filter_msg.vxlan.vni = filter_info->vxlan.vni;
+ break;
+ default:
+ nic_warn(nic_hwdev->dev_hdl, "No support filter type: 0x%x\n",
+ filter_info->filter_type);
+ return -EINVAL;
+ }
+
+ err = hinic_global_func_id_get(hwdev, &filter_msg.func_id);
+ if (err)
+ return err;
+
+ filter_msg.filter_type = filter_info->filter_type;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_RQ_FILTER,
+ &filter_msg, sizeof(filter_msg),
+ &filter_msg, &out_size);
+ if (filter_msg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ nic_warn(nic_hwdev->dev_hdl, "Not support del rxq filter\n");
+ } else if (err || !out_size || filter_msg.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to delte RX qfilter, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, filter_msg.status, out_size);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+int hinic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vlan_config vlan_info = {0};
+ u16 out_size = sizeof(vlan_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vlan_info.func_id = func_id;
+ vlan_info.vlan_id = vlan_id;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_ADD_VLAN,
+ &vlan_info, sizeof(vlan_info),
+ &vlan_info, &out_size);
+ if (err || !out_size || vlan_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to add vlan, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vlan_config vlan_info = {0};
+ u16 out_size = sizeof(vlan_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vlan_info.func_id = func_id;
+ vlan_info.vlan_id = vlan_id;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_VLAN,
+ &vlan_info, sizeof(vlan_info),
+ &vlan_info, &out_size);
+ if (err || !out_size || vlan_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to delte vlan, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vlan_filter vlan_filter = {0};
+ u16 out_size = sizeof(vlan_filter);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &vlan_filter.func_id);
+ if (err)
+ return err;
+ vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER,
+ &vlan_filter, sizeof(vlan_filter),
+ &vlan_filter, &out_size);
+ if (vlan_filter.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+ HINIC_IS_VF(nic_hwdev)) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || vlan_filter.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_filter.status, out_size);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_info port_msg = {0};
+ u16 out_size = sizeof(port_msg);
+ int err;
+
+ if (!hwdev || !port_info)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &port_msg.func_id);
+ if (err)
+ return err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_INFO,
+ &port_msg, sizeof(port_msg),
+ &port_msg, &out_size);
+ if (err || !out_size || port_msg.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_msg.status, out_size);
+ return -EINVAL;
+ }
+
+ port_info->autoneg_cap = port_msg.autoneg_cap;
+ port_info->autoneg_state = port_msg.autoneg_state;
+ port_info->duplex = port_msg.duplex;
+ port_info->port_type = port_msg.port_type;
+ port_info->speed = port_msg.speed;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_port_info);
+
+int hinic_set_autoneg(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_set_autoneg_cmd autoneg = {0};
+ u16 out_size = sizeof(autoneg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &autoneg.func_id);
+ if (err)
+ return err;
+
+ autoneg.enable = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_AUTONEG,
+ &autoneg, sizeof(autoneg),
+ &autoneg, &out_size);
+ if (err || !out_size || autoneg.status) {
+ nic_err(dev->dev_hdl, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n",
+ enable ? "enable" : "disable", err, autoneg.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_force_port_relink(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ int err;
+
+ /* Force port link down and link up */
+ err = hinic_set_port_link_status(hwdev, false);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to set port link down\n");
+ return -EFAULT;
+ }
+
+ err = hinic_set_port_link_status(hwdev, true);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to set port link up\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_get_link_mode(void *hwdev, enum hinic_link_mode *supported,
+ enum hinic_link_mode *advertised)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_link_mode_cmd link_mode = {0};
+ u16 out_size = sizeof(link_mode);
+ int err;
+
+ if (!hwdev || !supported || !advertised)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &link_mode.func_id);
+ if (err)
+ return err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
+ &link_mode, sizeof(link_mode),
+ &link_mode, &out_size);
+ if (err || !out_size || link_mode.status) {
+ nic_err(dev->dev_hdl,
+ "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_mode.status, out_size);
+ return -EINVAL;
+ }
+
+ *supported = link_mode.supported;
+ *advertised = link_mode.advertised;
+
+ return 0;
+}
+
+int hinic_set_port_link_status(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_set_link_status link_status = {0};
+ u16 out_size = sizeof(link_status);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &link_status.func_id);
+ if (err)
+ return err;
+
+ link_status.enable = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_LINK_STATUS,
+ &link_status, sizeof(link_status),
+ &link_status, &out_size);
+ if (err || !out_size || link_status.status) {
+ nic_err(dev->dev_hdl, "Failed to %s port link status, err: %d, status: 0x%x, out size: 0x%x\n",
+ enable ? "Enable" : "Disable", err, link_status.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_speed(void *hwdev, enum nic_speed_level speed)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_speed_cmd speed_info = {0};
+ u16 out_size = sizeof(speed_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &speed_info.func_id);
+ if (err)
+ return err;
+
+ speed_info.speed = speed;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_SPEED,
+ &speed_info, sizeof(speed_info),
+ &speed_info, &out_size);
+ if (err || !out_size || speed_info.status) {
+ nic_err(dev->dev_hdl,
+ "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, speed_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_speed(void *hwdev, enum nic_speed_level *speed)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_speed_cmd speed_info = {0};
+ u16 out_size = sizeof(speed_info);
+ int err;
+
+ if (!hwdev || !speed)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &speed_info.func_id);
+ if (err)
+ return err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_SPEED,
+ &speed_info, sizeof(speed_info),
+ &speed_info, &out_size);
+ if (err || !out_size || speed_info.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get speed, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, speed_info.status, out_size);
+ return -EINVAL;
+ }
+
+ *speed = speed_info.speed;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_speed);
+
+int hinic_get_link_state(void *hwdev, u8 *link_state)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_get_link get_link = {0};
+ u16 out_size = sizeof(get_link);
+ int err;
+
+ if (!hwdev || !link_state)
+ return -EINVAL;
+
+ if (FUNC_FORCE_LINK_UP(hwdev)) {
+ *link_state = 1;
+ return 0;
+ }
+
+ err = hinic_global_func_id_get(hwdev, &get_link.func_id);
+ if (err)
+ return err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
+ &get_link, sizeof(get_link),
+ &get_link, &out_size);
+ if (err || !out_size || get_link.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, get_link.status, out_size);
+ return -EINVAL;
+ }
+
+ *link_state = get_link.link_status;
+
+ return 0;
+}
+
+static int hinic_set_hw_pause_info(void *hwdev,
+ struct nic_pause_config nic_pause)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_pause_config pause_info = {0};
+ u16 out_size = sizeof(pause_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &pause_info.func_id);
+ if (err)
+ return err;
+
+ pause_info.auto_neg = nic_pause.auto_neg;
+ pause_info.rx_pause = nic_pause.rx_pause;
+ pause_info.tx_pause = nic_pause.tx_pause;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
+ &pause_info, sizeof(pause_info),
+ &pause_info, &out_size);
+ if (err || !out_size || pause_info.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_cfg *nic_cfg;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_cfg = &nic_hwdev->nic_io->nic_cfg;
+ if (nic_cfg->pfc_en) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set pause, please disable pfc first\n");
+ return -EPERM;
+ }
+
+ down(&nic_cfg->cfg_lock);
+
+ err = hinic_set_hw_pause_info(hwdev, nic_pause);
+ if (err) {
+ up(&nic_cfg->cfg_lock);
+ return err;
+ }
+
+ nic_cfg->pfc_en = 0;
+ nic_cfg->pfc_bitmap = 0;
+ nic_cfg->pause_set = true;
+ nic_cfg->nic_pause.auto_neg = nic_pause.auto_neg;
+ nic_cfg->nic_pause.rx_pause = nic_pause.rx_pause;
+ nic_cfg->nic_pause.tx_pause = nic_pause.tx_pause;
+
+ up(&nic_cfg->cfg_lock);
+
+ return 0;
+}
+
+int hinic_get_hw_pause_info(void *hwdev, struct nic_pause_config *nic_pause)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_pause_config pause_info = {0};
+ u16 out_size = sizeof(pause_info);
+ int err;
+
+ if (!hwdev || !nic_pause)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &pause_info.func_id);
+ if (err)
+ return err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
+ &pause_info, sizeof(pause_info),
+ &pause_info, &out_size);
+ if (err || !out_size || pause_info.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info.status, out_size);
+ return -EINVAL;
+ }
+
+ nic_pause->auto_neg = pause_info.auto_neg;
+ nic_pause->rx_pause = pause_info.rx_pause;
+ nic_pause->tx_pause = pause_info.tx_pause;
+
+ return 0;
+}
+
+int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_cfg *nic_cfg = &nic_hwdev->nic_io->nic_cfg;
+ int err = 0;
+
+ err = hinic_get_hw_pause_info(hwdev, nic_pause);
+ if (err)
+ return err;
+
+ if (nic_cfg->pause_set || !nic_pause->auto_neg) {
+ nic_pause->rx_pause = nic_cfg->nic_pause.rx_pause;
+ nic_pause->tx_pause = nic_cfg->nic_pause.tx_pause;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_mode(void *hwdev, u32 enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rx_mode_config rx_mode_cfg = {0};
+ u16 out_size = sizeof(rx_mode_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &rx_mode_cfg.func_id);
+ if (err)
+ return err;
+
+ rx_mode_cfg.rx_mode = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_MODE,
+ &rx_mode_cfg, sizeof(rx_mode_cfg),
+ &rx_mode_cfg, &out_size);
+ if (err || !out_size || rx_mode_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rx_mode_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* offload feature */
+int hinic_set_rx_vlan_offload(void *hwdev, u8 en)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vlan_offload vlan_cfg = {0};
+ u16 out_size = sizeof(vlan_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &vlan_cfg.func_id);
+ if (err)
+ return err;
+
+ vlan_cfg.vlan_rx_offload = en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD,
+ &vlan_cfg, sizeof(vlan_cfg),
+ &vlan_cfg, &out_size);
+ if (err || !out_size || vlan_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_csum_offload(void *hwdev, u32 en)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_checksum_offload rx_csum_cfg = {0};
+ u16 out_size = sizeof(rx_csum_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &rx_csum_cfg.func_id);
+ if (err)
+ return err;
+
+ rx_csum_cfg.rx_csum_offload = en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
+ &rx_csum_cfg, sizeof(rx_csum_cfg),
+ &rx_csum_cfg, &out_size);
+ if (err || !out_size || rx_csum_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rx_csum_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_tx_tso(void *hwdev, u8 tso_en)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_tso_config tso_cfg = {0};
+ u16 out_size = sizeof(tso_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &tso_cfg.func_id);
+ if (err)
+ return err;
+
+ tso_cfg.tso_en = tso_en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_TSO,
+ &tso_cfg, sizeof(tso_cfg),
+ &tso_cfg, &out_size);
+ if (err || !out_size || tso_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set tso, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, tso_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 wqe_num)
+{
+ struct hinic_hwdev *nic_hwdev = hwdev;
+ u8 ipv4_en = 0, ipv6_en = 0;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ ipv4_en = lro_en ? 1 : 0;
+ ipv6_en = lro_en ? 1 : 0;
+
+ nic_info(nic_hwdev->dev_hdl, "Set LRO max wqe number to %u\n", wqe_num);
+
+ err = hinic_set_rx_lro(hwdev, ipv4_en, ipv6_en, (u8)wqe_num);
+ if (err)
+ return err;
+
+ /* we don't set LRO timer for VF */
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ nic_info(nic_hwdev->dev_hdl, "Set LRO timer to %u\n", lro_timer);
+
+ return hinic_set_rx_lro_timer(hwdev, lro_timer);
+}
+
+static int hinic_set_rx_lro_timer(void *hwdev, u32 timer_value)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_lro_timer lro_timer = {0};
+ u16 out_size = sizeof(lro_timer);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ lro_timer.status = 0;
+ lro_timer.type = 0;
+ lro_timer.enable = 1;
+ lro_timer.timer = timer_value;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER,
+ &lro_timer, sizeof(lro_timer),
+ &lro_timer, &out_size);
+ if (lro_timer.status == 0xFF) {
+ /* For this case, we think status (0xFF) is OK */
+ lro_timer.status = 0;
+ nic_err(nic_hwdev->dev_hdl, "Set lro timer not supported by the current FW version, it will be 1ms default\n");
+ }
+
+ if (err || !out_size || lro_timer.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lro_timer.status, out_size);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_lro_config lro_cfg = {0};
+ u16 out_size = sizeof(lro_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &lro_cfg.func_id);
+ if (err)
+ return err;
+
+ lro_cfg.lro_ipv4_en = ipv4_en;
+ lro_cfg.lro_ipv6_en = ipv6_en;
+ lro_cfg.lro_max_wqe_num = max_wqe_num;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO,
+ &lro_cfg, sizeof(lro_cfg),
+ &lro_cfg, &out_size);
+ if (err || !out_size || lro_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lro_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_dcb_set_hw_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_set_pfc pfc = {0};
+ u16 out_size = sizeof(pfc);
+ int err;
+
+ err = hinic_global_func_id_get(hwdev, &pfc.func_id);
+ if (err)
+ return err;
+
+ pfc.pfc_bitmap = pfc_bitmap;
+ pfc.pfc_en = pfc_en;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PFC,
+ &pfc, sizeof(pfc), &pfc, &out_size);
+ if (err || pfc.status || !out_size) {
+ nic_err(dev->dev_hdl, "Failed to set pfc, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pfc.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* dcbtool */
+int hinic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_cfg *nic_cfg = &dev->nic_io->nic_cfg;
+ int err;
+
+ down(&nic_cfg->cfg_lock);
+
+ err = hinic_dcb_set_hw_pfc(hwdev, pfc_en, pfc_bitmap);
+ if (err) {
+ up(&nic_cfg->cfg_lock);
+ return err;
+ }
+
+ nic_cfg->pfc_en = pfc_en;
+ nic_cfg->pfc_bitmap = pfc_bitmap;
+
+ /* pause settings is opposite from pfc */
+ nic_cfg->nic_pause.rx_pause = pfc_en ? 0 : 1;
+ nic_cfg->nic_pause.tx_pause = pfc_en ? 0 : 1;
+
+ up(&nic_cfg->cfg_lock);
+
+ return 0;
+}
+
+int hinic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap)
+{
+ return 0;
+}
+
+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw,
+ u8 *prio)
+{
+ struct hinic_up_ets_cfg ets = {0};
+ u16 out_size = sizeof(ets);
+ u16 up_bw_t = 0;
+ u8 pg_bw_t = 0;
+ int i, err;
+
+ for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+ up_bw_t += *(up_bw + i);
+ pg_bw_t += *(pg_bw + i);
+
+ if (*(up_tc + i) > HINIC_DCB_TC_MAX) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Invalid up %d mapping tc: %d\n",
+ i, *(up_tc + i));
+ return -EINVAL;
+ }
+ }
+
+ if (pg_bw_t != 100 || (up_bw_t % 100) != 0) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Invalid pg_bw: %d or up_bw: %d\n", pg_bw_t, up_bw_t);
+ return -EINVAL;
+ }
+
+ ets.port_id = 0; /* reserved */
+ memcpy(ets.up_tc, up_tc, HINIC_DCB_TC_MAX);
+ memcpy(ets.pg_bw, pg_bw, HINIC_DCB_UP_MAX);
+ memcpy(ets.pgid, pgid, HINIC_DCB_UP_MAX);
+ memcpy(ets.up_bw, up_bw, HINIC_DCB_UP_MAX);
+ memcpy(ets.prio, prio, HINIC_DCB_UP_MAX);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ETS,
+ &ets, sizeof(ets), &ets, &out_size);
+ if (err || ets.status || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set ets, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, ets.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_dcb_get_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw,
+ u8 *prio)
+{
+ return 0;
+}
+
+int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up)
+{
+ struct hinic_cos_up_map map = {0};
+ u16 out_size = sizeof(map);
+ int err;
+
+ if (!hwdev || !cos_up)
+ return -EINVAL;
+
+ map.port_id = hinic_physical_port_id(hwdev);
+ map.cos_valid_mask = cos_valid_bitmap;
+ memcpy(map.map, cos_up, sizeof(map.map));
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_COS_UP_MAP,
+ &map, sizeof(map), &map, &out_size);
+ if (err || map.status || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set cos2up map, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, map.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map)
+{
+ struct hinic_hwdev *dev;
+ struct hinic_nic_io *nic_io;
+ struct hinic_set_rq_iq_mapping rq_iq_mapping = {0};
+ u16 out_size = sizeof(rq_iq_mapping);
+ int err;
+
+ if (!hwdev || num_rqs > HINIC_MAX_NUM_RQ)
+ return -EINVAL;
+
+ dev = hwdev;
+ nic_io = dev->nic_io;
+
+ hinic_qps_num_set(dev, nic_io->num_qps);
+
+ err = hinic_global_func_id_get(hwdev, &rq_iq_mapping.func_id);
+ if (err)
+ return err;
+
+ rq_iq_mapping.num_rqs = num_rqs;
+ rq_iq_mapping.rq_depth = (u16)ilog2(nic_io->rq_depth);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
+ &rq_iq_mapping, sizeof(rq_iq_mapping),
+ &rq_iq_mapping, &out_size);
+ if (err || !out_size || rq_iq_mapping.status) {
+ nic_err(dev->dev_hdl, "Failed to set rq cos mapping, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rq_iq_mapping.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_dcb_set_rq_iq_mapping);
+
+/* nictool */
+int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period)
+{
+ return 0;
+}
+
+int hinic_get_rx_lro(void *hwdev, struct nic_lro_info *cfg)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_lro_config lro_cfg = {0};
+ u16 out_size = sizeof(lro_cfg);
+ int err;
+
+ if (!hwdev || !cfg)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &lro_cfg.func_id);
+ if (err)
+ return err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LRO,
+ &lro_cfg, sizeof(lro_cfg),
+ &lro_cfg, &out_size);
+ if (err || !out_size || lro_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lro_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ cfg->func_id = lro_cfg.func_id;
+ cfg->lro_ipv4_en = lro_cfg.lro_ipv4_en;
+ cfg->lro_ipv6_en = lro_cfg.lro_ipv6_en;
+ cfg->lro_max_wqe_num = lro_cfg.lro_max_wqe_num;
+ return 0;
+}
+
+int hinic_get_jumbo_frame_size(void *hwdev, u32 *jumbo_size)
+{
+ return 0;
+}
+
+int hinic_set_jumbo_frame_size(void *hwdev, u32 jumbo_size)
+{
+ return 0;
+}
+
+int hinic_set_loopback_mode_ex(void *hwdev, u32 mode, u32 enable)
+{
+ struct hinic_port_loopback lb = {0};
+ u16 out_size = sizeof(lb);
+ int err;
+
+ lb.mode = mode;
+ lb.en = enable;
+
+ if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Invalid loopback mode %d to set\n", mode);
+ return -EINVAL;
+ }
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LOOPBACK_MODE,
+ &lb, sizeof(lb), &lb, &out_size);
+ if (err || !out_size || lb.status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set loopback mode %d en %d, err: %d, status: 0x%x, out size: 0x%x\n",
+ mode, enable, err, lb.status, out_size);
+ return -EINVAL;
+ }
+
+ nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Set loopback mode %d en %d succeed\n", mode, enable);
+
+ return 0;
+}
+
+int hinic_get_loopback_mode_ex(void *hwdev, u32 *mode, u32 *enable)
+{
+ struct hinic_port_loopback lb = {0};
+ u16 out_size = sizeof(lb);
+ int err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LOOPBACK_MODE,
+ &lb, sizeof(lb), &lb, &out_size);
+ if (err || !out_size || lb.status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get loopback mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lb.status, out_size);
+ return -EINVAL;
+ }
+
+ *mode = lb.mode;
+ *enable = lb.en;
+ return 0;
+}
+
+int hinic_set_loopback_mode(void *hwdev, bool enable)
+{
+ return hinic_set_loopback_mode_ex(hwdev, HINIC_INTERNAL_LP_MODE,
+ enable);
+}
+
+int hinic_get_port_enable_state(void *hwdev, bool *enable)
+{
+ return 0;
+}
+
+int hinic_get_vport_enable_state(void *hwdev, bool *enable)
+{
+ return 0;
+}
+
+int hinic_set_lli_state(void *hwdev, u8 lli_state)
+{
+ return 0;
+}
+
+int hinic_set_vport_enable(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_vport_state en_state = {0};
+ u16 out_size = sizeof(en_state);
+ int err;
+ u32 timeout;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &en_state.func_id);
+ if (err)
+ return err;
+
+ en_state.state = enable ? 1 : 0;
+
+ if (HINIC_IS_VF(nic_hwdev))
+ timeout = SET_VPORT_MBOX_TIMEOUT;
+ else
+ timeout = SET_VPORT_MGMT_TIMEOUT;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_SET_VPORT_ENABLE,
+ &en_state, sizeof(en_state), &en_state,
+ &out_size, timeout);
+
+ if (err || !out_size || en_state.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, en_state.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_vport_enable);
+
+#define NIC_PORT_DISABLE 0x0
+#define NIC_PORT_ENABLE 0x3
+int hinic_set_port_enable(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_state en_state = {0};
+ u16 out_size = sizeof(en_state);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (HINIC_IS_VF(nic_hwdev))
+ return 0;
+
+ err = hinic_global_func_id_get(hwdev, &en_state.func_id);
+ if (err)
+ return err;
+
+ en_state.version = HINIC_CMD_VER_FUNC_ID;
+ en_state.state = enable ? NIC_PORT_ENABLE : NIC_PORT_DISABLE;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_ENABLE,
+ &en_state, sizeof(en_state), &en_state,
+ &out_size);
+ if (err || !out_size || en_state.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, en_state.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_port_enable);
+
+/* rss */
+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct nic_rss_context_tbl *ctx_tbl;
+ struct hinic_cmd_buf *cmd_buf;
+ u32 ctx = 0;
+ u64 out_param;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctx |= HINIC_RSS_TYPE_SET(1, VALID) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+
+ cmd_buf->size = sizeof(struct nic_rss_context_tbl);
+
+ ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf;
+ ctx_tbl->group_index = cpu_to_be32(tmpl_idx);
+ ctx_tbl->offset = 0;
+ ctx_tbl->size = sizeof(u32);
+ ctx_tbl->size = cpu_to_be32(ctx_tbl->size);
+ ctx_tbl->rsvd = 0;
+ ctx_tbl->ctx = cpu_to_be32(ctx);
+
+ /* cfg the rss context table by command queue */
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ cmd_buf, &out_param, 0);
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ if (err || out_param != 0) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rss context table, err: %d\n",
+ err);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type *rss_type)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_context_table ctx_tbl = {0};
+ u16 out_size = sizeof(ctx_tbl);
+ int err;
+
+ if (!hwdev || !rss_type)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &ctx_tbl.func_id);
+ if (err)
+ return err;
+
+ ctx_tbl.template_id = (u8)tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL,
+ &ctx_tbl, sizeof(ctx_tbl),
+ &ctx_tbl, &out_size);
+ if (err || !out_size || ctx_tbl.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, ctx_tbl.status, out_size);
+ return -EINVAL;
+ }
+
+ rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4);
+ rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6);
+ rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT);
+ rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4);
+ rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6);
+ rss_type->tcp_ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context,
+ TCP_IPV6_EXT);
+ rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4);
+ rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6);
+
+ return 0;
+}
+
+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, const u8 *temp)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_template_key temp_key = {0};
+ u16 out_size = sizeof(temp_key);
+ int err;
+
+ if (!hwdev || !temp)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &temp_key.func_id);
+ if (err)
+ return err;
+
+ temp_key.template_id = (u8)tmpl_idx;
+ memcpy(temp_key.key, temp, HINIC_RSS_KEY_SIZE);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL,
+ &temp_key, sizeof(temp_key),
+ &temp_key, &out_size);
+ if (err || !out_size || temp_key.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, temp_key.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_template_key temp_key = {0};
+ u16 out_size = sizeof(temp_key);
+ int err;
+
+ if (!hwdev || !temp)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &temp_key.func_id);
+ if (err)
+ return err;
+
+ temp_key.template_id = (u8)tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
+ &temp_key, sizeof(temp_key),
+ &temp_key, &out_size);
+ if (err || !out_size || temp_key.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get hash key, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, temp_key.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE);
+
+ return 0;
+}
+
+int hinic_rss_get_hash_engine(void *hwdev, u8 tmpl_idx, u8 *type)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_engine_type hash_type = {0};
+ u16 out_size = sizeof(hash_type);
+ int err;
+
+ if (!hwdev || !type)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &hash_type.func_id);
+ if (err)
+ return err;
+
+ hash_type.template_id = tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_HASH_ENGINE,
+ &hash_type, sizeof(hash_type),
+ &hash_type, &out_size);
+ if (err || !out_size || hash_type.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, hash_type.status, out_size);
+ return -EINVAL;
+ }
+
+ *type = hash_type.hash_engine;
+ return 0;
+}
+
+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_engine_type hash_type = {0};
+ u16 out_size = sizeof(hash_type);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &hash_type.func_id);
+ if (err)
+ return err;
+
+ hash_type.hash_engine = type;
+ hash_type.template_id = tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
+ &hash_type, sizeof(hash_type),
+ &hash_type, &out_size);
+ if (err || !out_size || hash_type.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set hash engine, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, hash_type.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, const u32 *indir_table)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct nic_rss_indirect_tbl *indir_tbl;
+ struct hinic_cmd_buf *cmd_buf;
+ u32 i;
+ u32 *temp;
+ u32 indir_size;
+ u64 out_param;
+ int err;
+
+ if (!hwdev || !indir_table)
+ return -EINVAL;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ cmd_buf->size = sizeof(struct nic_rss_indirect_tbl);
+
+ indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf;
+ indir_tbl->group_index = cpu_to_be32(tmpl_idx);
+
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
+ indir_tbl->entry[i] = (u8)(*(indir_table + i));
+
+ if (0x3 == (i & 0x3)) {
+ temp = (u32 *)&indir_tbl->entry[i - 3];
+ *temp = cpu_to_be32(*temp);
+ }
+ }
+
+ /* cfg the rss indirect table by command queue */
+ indir_size = HINIC_RSS_INDIR_SIZE / 2;
+ indir_tbl->offset = 0;
+ indir_tbl->size = cpu_to_be32(indir_size);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ cmd_buf, &out_param, 0);
+ if (err || out_param != 0) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ goto free_buf;
+ }
+
+ indir_tbl->offset = cpu_to_be32(indir_size);
+ indir_tbl->size = cpu_to_be32(indir_size);
+ memcpy(&indir_tbl->entry[0], &indir_tbl->entry[indir_size], indir_size);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ cmd_buf, &out_param, 0);
+ if (err || out_param != 0) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ }
+
+free_buf:
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_indir_table rss_cfg = {0};
+ u16 out_size = sizeof(rss_cfg);
+ int err = 0, i;
+
+ err = hinic_global_func_id_get(hwdev, &rss_cfg.func_id);
+ if (err)
+ return err;
+
+ rss_cfg.template_id = (u8)tmpl_idx;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
+ &rss_cfg, sizeof(rss_cfg), &rss_cfg,
+ &out_size);
+ if (err || !out_size || rss_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE);
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir_table[i] = rss_cfg.indir[i];
+
+ return 0;
+}
+
+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_config rss_cfg = {0};
+ u16 out_size = sizeof(rss_cfg);
+ int err;
+
+ /* micro code required: number of TC should be power of 2 */
+ if (!hwdev || !prio_tc || (tc_num & (tc_num - 1)))
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &rss_cfg.func_id);
+ if (err)
+ return err;
+
+ rss_cfg.rss_en = rss_en;
+ rss_cfg.template_id = tmpl_idx;
+ rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0;
+
+ memcpy(rss_cfg.prio_tc, prio_tc, HINIC_DCB_UP_MAX);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_CFG,
+ &rss_cfg, sizeof(rss_cfg),
+ &rss_cfg, &out_size);
+ if (err || !out_size || rss_cfg.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats)
+{
+ struct hinic_port_stats_info stats_info = {0};
+ struct hinic_cmd_vport_stats vport_stats = {0};
+ u16 out_size = sizeof(vport_stats);
+ int err;
+
+ err = hinic_global_func_id_get(hwdev, &stats_info.func_id);
+ if (err)
+ return err;
+
+ stats_info.stats_version = HINIC_PORT_STATS_VERSION;
+ stats_info.stats_size = sizeof(vport_stats);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT,
+ &stats_info, sizeof(stats_info),
+ &vport_stats, &out_size);
+ if (err || !out_size || vport_stats.status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vport_stats.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(stats, &vport_stats.stats, sizeof(*stats));
+
+ return 0;
+}
+
+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats)
+{
+ struct hinic_port_stats *port_stats;
+ struct hinic_port_stats_info stats_info = {0};
+ u16 out_size = sizeof(*port_stats);
+ int err;
+
+ port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
+ if (!port_stats)
+ return -ENOMEM;
+
+ stats_info.stats_version = HINIC_PORT_STATS_VERSION;
+ stats_info.stats_size = sizeof(*port_stats);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS,
+ &stats_info, sizeof(stats_info),
+ port_stats, &out_size);
+ if (err || !out_size || port_stats->status) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_stats->status, out_size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(stats, &port_stats->stats, sizeof(*stats));
+
+out:
+ kfree(port_stats);
+
+ return err;
+}
+
+int hinic_get_mgmt_version(void *hwdev, u8 *mgmt_ver)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_version_info up_ver = {0};
+ u16 out_size;
+ int err;
+
+ out_size = sizeof(up_ver);
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,
+ &up_ver, sizeof(up_ver), &up_ver,
+ &out_size);
+ if (err || !out_size || up_ver.status) {
+ nic_err(dev->dev_hdl, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, up_ver.status, out_size);
+ return -EINVAL;
+ }
+
+ err = snprintf(mgmt_ver, HINIC_MGMT_VERSION_MAX_LEN, "%s", up_ver.ver);
+ if (err <= 0 || err >= HINIC_MGMT_VERSION_MAX_LEN) {
+ nic_err(dev->dev_hdl,
+ "Failed to snprintf fw version, function return(%d) and dest_len(%d)\n",
+ err, HINIC_MGMT_VERSION_MAX_LEN);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_version_info ver_info = {0};
+ u16 out_size = sizeof(ver_info);
+ int err;
+
+ if (!hwdev || !fw_ver)
+ return -EINVAL;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,
+ &ver_info, sizeof(ver_info), &ver_info,
+ &out_size);
+ if (err || !out_size || ver_info.status) {
+ nic_err(dev->dev_hdl, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, ver_info.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(fw_ver->mgmt_ver, ver_info.ver, HINIC_FW_VERSION_NAME);
+
+ out_size = sizeof(ver_info);
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_BOOT_VERSION,
+ &ver_info, sizeof(ver_info), &ver_info,
+ &out_size);
+ if (err || !out_size || ver_info.status) {
+ nic_err(dev->dev_hdl, "Failed to get boot versionerr: %d, status: 0x%x, out size: 0x%x\n",
+ err, ver_info.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(fw_ver->boot_ver, ver_info.ver, HINIC_FW_VERSION_NAME);
+
+ out_size = sizeof(ver_info);
+ err = l2nic_msg_to_mgmt_sync(hwdev,
+ HINIC_PORT_CMD_GET_MICROCODE_VERSION,
+ &ver_info, sizeof(ver_info), &ver_info,
+ &out_size);
+ if (err || !out_size || ver_info.status) {
+ nic_err(dev->dev_hdl, "Failed to get microcode version, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, ver_info.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(fw_ver->microcode_ver, ver_info.ver, HINIC_FW_VERSION_NAME);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_fw_version);
+
+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_template_mgmt template_mgmt = {0};
+ u16 out_size = sizeof(template_mgmt);
+ int err;
+
+ if (!hwdev || !tmpl_idx)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &template_mgmt.func_id);
+ if (err)
+ return err;
+
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.status) {
+ if (template_mgmt.status == HINIC_MGMT_STATUS_ERR_FULL) {
+ nic_warn(nic_hwdev->dev_hdl, "Failed to alloc rss template, table is full\n");
+ return -ENOSPC;
+ }
+ nic_err(nic_hwdev->dev_hdl, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, template_mgmt.status, out_size);
+ return -EINVAL;
+ }
+
+ *tmpl_idx = template_mgmt.template_id;
+
+ return 0;
+}
+
+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_rss_template_mgmt template_mgmt = {0};
+ u16 out_size = sizeof(template_mgmt);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &template_mgmt.func_id);
+ if (err)
+ return err;
+
+ template_mgmt.template_id = tmpl_idx;
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.status) {
+ nic_err(nic_hwdev->dev_hdl, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, template_mgmt.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_port_funcs_state(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_funcs_state state = {0};
+ u16 out_size = sizeof(state);
+ int err = 0;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &state.func_id);
+ if (err)
+ return err;
+
+ state.drop_en = enable ? 0 : 1;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_FUNCS_STATE,
+ &state, sizeof(state), &state, &out_size);
+ if (err || !out_size || state.status) {
+ nic_err(dev->dev_hdl, "Failed to %s all functions in port, err: %d, status: 0x%x, out size: 0x%x\n",
+ enable ? "enable" : "disable", err, state.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_reset_port_link_cfg(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_reset_link_cfg reset_cfg = {0};
+ u16 out_size = sizeof(reset_cfg);
+ int err;
+
+ err = hinic_global_func_id_get(hwdev, &reset_cfg.func_id);
+ if (err)
+ return err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RESET_LINK_CFG,
+ &reset_cfg, sizeof(reset_cfg),
+ &reset_cfg, &out_size);
+ if (err || !out_size || reset_cfg.status) {
+ nic_err(dev->dev_hdl, "Failed to reset port link configure, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, reset_cfg.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_save_vf_mac(void *hwdev, u16 vf_id, u8 *mac)
+{
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev || !mac)
+ return -EINVAL;
+
+ nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ memcpy(nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].vf_mac_addr, mac,
+ ETH_ALEN);
+
+ return 0;
+}
+
+static int hinic_change_vf_mtu_msg_handler(struct hinic_hwdev *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ int err;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_CHANGE_MTU, buf_in, in_size,
+ buf_out, out_size, 0);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set VF %u mtu\n", vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static bool is_ether_addr_zero(const u8 *addr)
+{
+ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+
+static int hinic_get_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ struct hinic_port_mac_set *mac_info = buf_out;
+ int err;
+
+ if (nic_io->hwdev->func_mode == FUNC_MOD_MULTI_BM_SLAVE ||
+ nic_io->hwdev->func_mode == FUNC_MOD_MULTI_VM_SLAVE ||
+ (hinic_support_ovs(nic_io->hwdev, NULL))) {
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_GET_MAC, buf_in,
+ in_size, buf_out, out_size, 0);
+
+ if (!err) {
+ if (is_ether_addr_zero(&mac_info->mac[0]))
+ memcpy(mac_info->mac,
+ vf_info->vf_mac_addr, ETH_ALEN);
+ }
+ return err;
+ }
+
+ memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN);
+ mac_info->status = 0;
+ *out_size = sizeof(*mac_info);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ struct hinic_port_mac_set *mac_in = buf_in;
+ struct hinic_port_mac_set *mac_out = buf_out;
+ int err;
+
+ if (vf_info->pf_set_mac && !(vf_info->trust) &&
+ is_valid_ether_addr(mac_in->mac)) {
+ nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF %d MAC address\n",
+ HW_VF_ID_TO_OS(vf));
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_SET_MAC, buf_in, in_size,
+ buf_out, out_size, 0);
+ if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to set VF %d MAC address, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_del_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ struct hinic_port_mac_set *mac_in = buf_in;
+ struct hinic_port_mac_set *mac_out = buf_out;
+ int err;
+
+ if (vf_info->pf_set_mac && !(vf_info->trust) &&
+ is_valid_ether_addr(mac_in->mac) &&
+ !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) {
+ nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF mac\n");
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_DEL_MAC, buf_in, in_size,
+ buf_out, out_size, 0);
+ if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_update_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ struct hinic_port_mac_update *mac_in = buf_in;
+ struct hinic_port_mac_update *mac_out = buf_out;
+ int err;
+
+ if (!is_valid_ether_addr(mac_in->new_mac)) {
+ nic_err(nic_io->hwdev->dev_hdl, "Update VF MAC is invalid\n");
+ return -EINVAL;
+ }
+
+ if (vf_info->pf_set_mac && !(vf_info->trust)) {
+ nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF mac\n");
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_UPDATE_MAC, buf_in,
+ in_size, buf_out, out_size, 0);
+ if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ nic_warn(nic_io->hwdev->dev_hdl, "Failed to update VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+/*lint -save -e734*/
+static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid,
+ u8 qos, int vf_id)
+{
+ struct hinic_vf_vlan_config vf_vlan = {0};
+ u8 cmd;
+ u16 out_size = sizeof(vf_vlan);
+ int err;
+
+ /* VLAN 0 is a special case, don't allow it to be removed */
+ if (!vid && !add)
+ return 0;
+
+ vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ vf_vlan.vlan_id = vid;
+ vf_vlan.qos = qos;
+
+ if (add)
+ cmd = HINIC_PORT_CMD_SET_VF_VLAN;
+ else
+ cmd = HINIC_PORT_CMD_CLR_VF_VLAN;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, cmd, &vf_vlan, sizeof(vf_vlan),
+ &vf_vlan, &out_size);
+ if (err || !out_size || vf_vlan.status) {
+ nic_err(hwdev->dev_hdl, "Failed to set VF %d vlan, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*lint -restore*/
+static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 func_id, vlan_id;
+ int err = 0;
+
+ vf_info = hwdev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac) {
+ func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ if (FW_SUPPORT_MAC_REUSE_FUNC(hwdev)) {
+ vlan_id = vf_info->pf_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+ } else {
+ vlan_id = 0;
+ }
+
+ err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, vlan_id,
+ func_id);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set VF %d MAC\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+ if (hinic_vf_info_vlanprio(hwdev, vf_id)) {
+ err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan,
+ vf_info->pf_qos, vf_id);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to add VF %d VLAN_QOS\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (vf_info->max_rate) {
+ err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate,
+ vf_info->min_rate);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set VF %d max rate %d, min rate %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->max_rate,
+ vf_info->min_rate);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ struct hinic_register_vf *register_info = buf_out;
+ int err;
+
+ if (vf_id > nic_io->max_vfs) {
+ nic_err(hw_dev->dev_hdl, "Register VF id %d exceed limit[0-%d]\n",
+ HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs));
+ register_info->status = EFAULT;
+ return -EFAULT;
+ }
+
+ *out_size = sizeof(*register_info);
+ err = hinic_init_vf_config(hw_dev, vf_id);
+ if (err) {
+ register_info->status = EFAULT;
+ return err;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true;
+
+ return 0;
+}
+
+void hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+
+ if (vf_id > nic_io->max_vfs)
+ return;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false;
+}
+
+static void hinic_get_vf_link_status_msg_handler(struct hinic_nic_io *nic_io,
+ u16 vf_id, void *buf_out,
+ u16 *out_size)
+{
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ struct hinic_get_link *get_link = buf_out;
+ bool link_forced, link_up;
+
+ link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced;
+ link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up;
+
+ if (link_forced)
+ get_link->link_status = link_up ?
+ HINIC_LINK_UP : HINIC_LINK_DOWN;
+ else
+ get_link->link_status = nic_io->link_status;
+
+ get_link->status = 0;
+ *out_size = sizeof(*get_link);
+}
+
+static void hinic_get_vf_cos_msg_handler(struct hinic_nic_io *nic_io,
+ u16 vf_id, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_vf_dcb_state *dcb_state = buf_out;
+
+ memcpy(&dcb_state->state, &nic_io->dcb_state,
+ sizeof(nic_io->dcb_state));
+
+ dcb_state->status = 0;
+ *out_size = sizeof(*dcb_state);
+}
+
+/* pf receive message from vf */
+int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ u8 size = ARRAY_SIZE(nic_cmd_support_vf);
+ struct hinic_nic_io *nic_io;
+ int err = 0;
+ u32 timeout = 0;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ if (!hinic_mbox_check_cmd_valid(hwdev, nic_cmd_support_vf, vf_id, cmd,
+ buf_in, in_size, size)) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "PF Receive VF nic cmd(0x%x) or mbox len(0x%x) is invalid\n",
+ cmd, in_size);
+ err = HINIC_MBOX_VF_CMD_ERROR;
+ return err;
+ }
+
+ nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ switch (cmd) {
+ case HINIC_PORT_CMD_VF_REGISTER:
+ err = hinic_register_vf_msg_handler(hwdev, vf_id, buf_out,
+ out_size);
+ break;
+
+ case HINIC_PORT_CMD_VF_UNREGISTER:
+ *out_size = 0;
+ hinic_unregister_vf_msg_handler(hwdev, vf_id);
+ break;
+
+ case HINIC_PORT_CMD_CHANGE_MTU:
+ err = hinic_change_vf_mtu_msg_handler(hwdev, vf_id, buf_in,
+ in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_PORT_CMD_GET_MAC:
+ hinic_get_vf_mac_msg_handler(nic_io, vf_id, buf_in,
+ in_size, buf_out, out_size);
+ break;
+
+ case HINIC_PORT_CMD_SET_MAC:
+ err = hinic_set_vf_mac_msg_handler(nic_io, vf_id, buf_in,
+ in_size, buf_out, out_size);
+ break;
+
+ case HINIC_PORT_CMD_DEL_MAC:
+ err = hinic_del_vf_mac_msg_handler(nic_io, vf_id, buf_in,
+ in_size, buf_out, out_size);
+ break;
+
+ case HINIC_PORT_CMD_UPDATE_MAC:
+ err = hinic_update_vf_mac_msg_handler(nic_io, vf_id, buf_in,
+ in_size, buf_out,
+ out_size);
+ break;
+
+ case HINIC_PORT_CMD_GET_LINK_STATE:
+ hinic_get_vf_link_status_msg_handler(nic_io, vf_id, buf_out,
+ out_size);
+ break;
+
+ case HINIC_PORT_CMD_GET_VF_COS:
+ hinic_get_vf_cos_msg_handler(nic_io, vf_id, buf_out, out_size);
+ break;
+
+ default:
+ /* pass through */
+ if (cmd == HINIC_PORT_CMD_SET_VPORT_ENABLE)
+ timeout = SET_VPORT_MGMT_TIMEOUT;
+
+ err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC,
+ cmd, buf_in, in_size,
+ buf_out, out_size, timeout);
+
+ break;
+ }
+
+ if (err && err != HINIC_DEV_BUSY_ACTIVE_FW &&
+ err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ nic_err(nic_io->hwdev->dev_hdl, "PF receive VF L2NIC cmd: %d process error, err: %d\n",
+ cmd, err);
+ return err;
+}
+
+static int hinic_init_vf_infos(struct hinic_nic_io *nic_io, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ u8 vf_link_state;
+
+ if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) {
+ nic_warn(nic_io->hwdev->dev_hdl, "Module Parameter set_vf_link_state value %d is out of range, resetting to %d\n",
+ set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO);
+ set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO;
+ }
+
+ vf_link_state = hinic_support_ovs(nic_io->hwdev, NULL) ?
+ HINIC_IFLA_VF_LINK_STATE_ENABLE : set_vf_link_state;
+
+ if (FUNC_FORCE_LINK_UP(nic_io->hwdev))
+ vf_link_state = HINIC_IFLA_VF_LINK_STATE_ENABLE;
+
+ switch (vf_link_state) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[vf_id].link_forced = false;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = true;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = false;
+ break;
+ default:
+ nic_err(nic_io->hwdev->dev_hdl, "Input parameter set_vf_link_state error: %d\n",
+ vf_link_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id)
+{
+ u16 i, func_idx;
+ int err;
+
+ /* vf use 256K as default wq page size, and can't change it */
+ for (i = start_vf_id; i <= end_vf_id; i++) {
+ func_idx = hinic_glb_pf_vf_offset(hwdev) + i;
+ err = hinic_set_wq_page_size(hwdev, func_idx,
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id)
+{
+ u16 func_idx, idx;
+
+ for (idx = start_vf_id; idx <= end_vf_id; idx++) {
+ func_idx = hinic_glb_pf_vf_offset(hwdev) + idx;
+ hinic_set_wq_page_size(hwdev, func_idx, HINIC_HW_WQ_PAGE_SIZE);
+
+ hinic_clear_vf_infos(hwdev, idx);
+ }
+
+ return 0;
+}
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_nic_io *nic_io;
+ int err = 0;
+ struct hinic_register_vf register_info = {0};
+ u32 size;
+ u16 i, out_size = sizeof(register_info);
+
+ hwdev->nic_io = kzalloc(sizeof(*hwdev->nic_io), GFP_KERNEL);
+ if (!hwdev->nic_io)
+ return -ENOMEM;
+
+ nic_io = hwdev->nic_io;
+ nic_io->hwdev = hwdev;
+
+ sema_init(&nic_io->nic_cfg.cfg_lock, 1);
+
+ if (hinic_func_type(hwdev) == TYPE_VF) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_REGISTER,
+ ®ister_info, sizeof(register_info),
+ ®ister_info, &out_size, 0);
+ if (err || register_info.status || !out_size) {
+ nic_err(hwdev->dev_hdl, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, register_info.status, out_size);
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ err = -EIO;
+ goto out_free_nic_io;
+ }
+ } else {
+ nic_io->max_vfs = hinic_func_max_vf(hwdev);
+ size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs;
+ if (size != 0) {
+ nic_io->vf_infos = kzalloc(size, GFP_KERNEL);
+ if (!nic_io->vf_infos) {
+ err = -ENOMEM;
+ goto out_free_nic_io;
+ }
+
+ for (i = 0; i < nic_io->max_vfs; i++) {
+ err = hinic_init_vf_infos(nic_io, i);
+ if (err)
+ goto init_vf_infos_err;
+ }
+
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_pf_mbox_handler);
+ if (err)
+ goto register_pf_mbox_cb_err;
+ }
+ }
+
+ return 0;
+
+register_pf_mbox_cb_err:
+init_vf_infos_err:
+ kfree(nic_io->vf_infos);
+
+out_free_nic_io:
+ kfree(hwdev->nic_io);
+ hwdev->nic_io = NULL;
+
+ return err;
+}
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf unregister = {0};
+ u16 out_size = sizeof(unregister);
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_UNREGISTER,
+ &unregister, sizeof(unregister),
+ &unregister, &out_size, 0);
+ if (err || !out_size || unregister.status)
+ nic_err(hwdev->dev_hdl, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, unregister.status, out_size);
+ } else {
+ if (hwdev->nic_io->vf_infos) {
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ kfree(hwdev->nic_io->vf_infos);
+ }
+ }
+
+ kfree(hwdev->nic_io);
+ hwdev->nic_io = NULL;
+}
+
+/*lint -save -e734*/
+/* this function just be called by hinic_ndo_set_vf_mac, others are
+ * not permitted
+ */
+int hinic_set_vf_mac(void *hwdev, int vf, unsigned char *mac_addr)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+ int del_vf_mac = is_zero_ether_addr(mac_addr);
+ u16 func_id;
+ int err;
+
+ /* duplicate request, so just return success */
+ if (!memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN))
+ return 0;
+
+ func_id = hinic_glb_pf_vf_offset(hw_dev) + vf;
+ if (del_vf_mac)
+ err = hinic_del_mac(hwdev, vf_info->vf_mac_addr, 0, func_id);
+ else
+ err = hinic_update_mac(hw_dev, vf_info->vf_mac_addr,
+ mac_addr, 0, func_id);
+ if (err)
+ return err;
+
+ memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN);
+ vf_info->pf_set_mac = !del_vf_mac;
+
+ return 0;
+}
+
+int hinic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hw_dev, true, vlan, qos, vf_id);
+ if (err)
+ return err;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos;
+
+ nic_info(hw_dev->dev_hdl, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan, qos, HW_VF_ID_TO_OS(vf_id));
+ return 0;
+}
+
+int hinic_kill_vf_vlan(void *hwdev, int vf_id)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hw_dev, false,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos,
+ vf_id);
+ if (err)
+ return err;
+
+ nic_info(hw_dev->dev_hdl, "Remove VLAN %d on VF %d\n",
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ HW_VF_ID_TO_OS(vf_id));
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0;
+
+ return 0;
+}
+
+u16 hinic_vf_info_vlanprio(void *hwdev, int vf_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ u16 pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan;
+ u8 pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos;
+ u16 vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT;
+
+ return vlanprio;
+}
+
+/*lint -restore*/
+
+bool hinic_vf_is_registered(void *hwdev, u16 vf_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ return nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered;
+}
+
+void hinic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct vf_data_storage *vfinfo;
+
+ vfinfo = hw_dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ ivi->vf = HW_VF_ID_TO_OS(vf_id);
+ memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN);
+ ivi->vlan = vfinfo->pf_vlan;
+ ivi->qos = vfinfo->pf_qos;
+ ivi->spoofchk = vfinfo->spoofchk;
+ ivi->trusted = vfinfo->trust;
+
+ ivi->max_tx_rate = vfinfo->max_rate;
+ ivi->min_tx_rate = vfinfo->min_rate;
+
+ if (!vfinfo->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vfinfo->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+}
+
+void hinic_clear_vf_infos(void *hwdev, u16 vf_id)
+{
+ struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev;
+ struct vf_data_storage *vf_infos;
+ u16 func_id;
+
+ func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ vf_infos = hw_dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_infos->pf_set_mac)
+ hinic_del_mac(hwdev, vf_infos->vf_mac_addr, 0, func_id);
+
+ if (hinic_vf_info_vlanprio(hwdev, vf_id))
+ hinic_kill_vf_vlan(hwdev, vf_id);
+
+ if (vf_infos->max_rate)
+ hinic_set_vf_tx_rate(hwdev, vf_id, 0, 0);
+
+ if (vf_infos->spoofchk)
+ hinic_set_vf_spoofchk(hwdev, vf_id, false);
+
+ if (vf_infos->trust)
+ hinic_set_vf_trust(hwdev, vf_id, false);
+
+ memset(vf_infos, 0, sizeof(*vf_infos));
+ /* set vf_infos to default */
+ hinic_init_vf_infos(hw_dev->nic_io, HW_VF_ID_TO_OS(vf_id));
+}
+
+static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id,
+ u8 link_status)
+{
+ struct hinic_port_link_status link = {0};
+ struct vf_data_storage *vf_infos = hwdev->nic_io->vf_infos;
+ u16 out_size = sizeof(link);
+ int err;
+
+ if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) {
+ link.link = link_status;
+ link.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC,
+ vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT,
+ &link, sizeof(link),
+ &link, &out_size, 0);
+ if (err || !out_size || link.status)
+ nic_err(hwdev->dev_hdl,
+ "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err,
+ link.status, out_size);
+ }
+}
+
+/* send link change event mbox msg to active vfs under the pf */
+void hinic_notify_all_vfs_link_changed(void *hwdev, u8 link_status)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ u16 i;
+
+ nic_io->link_status = link_status;
+ for (i = 1; i <= nic_io->max_vfs; i++) {
+ if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced)
+ hinic_notify_vf_link_status(nic_io->hwdev, i,
+ link_status);
+ }
+}
+
+void hinic_save_pf_link_status(void *hwdev, u8 link_status)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ nic_io->link_status = link_status;
+}
+
+int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ u8 link_status = 0;
+
+ switch (link) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ?
+ true : false;
+ link_status = nic_io->link_status;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true;
+ link_status = HINIC_LINK_UP;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false;
+ link_status = HINIC_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Notify the VF of its new link state */
+ hinic_notify_vf_link_status(hwdev, vf_id, link_status);
+
+ return 0;
+}
+
+int hinic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io = NULL;
+ struct hinic_spoofchk_set spoofchk_cfg = {0};
+ struct vf_data_storage *vf_infos = NULL;
+ u16 out_size = sizeof(spoofchk_cfg);
+ int err = 0;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = hw_dev->nic_io;
+ vf_infos = nic_io->vf_infos;
+
+ spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ spoofchk_cfg.state = spoofchk ? 1 : 0;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK,
+ &spoofchk_cfg,
+ sizeof(spoofchk_cfg), &spoofchk_cfg,
+ &out_size, 0);
+ if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || spoofchk_cfg.status) {
+ nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status,
+ out_size);
+ err = -EINVAL;
+ }
+
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk;
+
+ return err;
+}
+
+int hinic_set_vf_trust(void *hwdev, u16 vf_id, bool trust)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io = NULL;
+ struct vf_data_storage *vf_infos = NULL;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = hw_dev->nic_io;
+ vf_infos = nic_io->vf_infos;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].trust = trust;
+
+ return 0;
+}
+
+bool hinic_vf_info_spoofchk(void *hwdev, int vf_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ bool spoofchk = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk;
+
+ return spoofchk;
+}
+
+bool hinic_vf_info_trust(void *hwdev, int vf_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ bool trust = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust;
+
+ return trust;
+}
+
+static int hinic_set_vf_rate_limit(void *hwdev, u16 vf_id, u32 tx_rate)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ struct hinic_tx_rate_cfg rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ rate_cfg.tx_rate = tx_rate;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_SET_VF_RATE, &rate_cfg,
+ sizeof(rate_cfg), &rate_cfg,
+ &out_size, 0);
+ if (err || !out_size || rate_cfg.status) {
+ nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status,
+ out_size);
+ if (rate_cfg.status)
+ return rate_cfg.status;
+
+ return -EIO;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0;
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate_max_min(void *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io = hw_dev->nic_io;
+ struct hinic_tx_rate_cfg_max_min rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id;
+ rate_cfg.max_rate = max_rate;
+ rate_cfg.min_rate = min_rate;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size, 0);
+ if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rate_cfg.status) || err || !out_size) {
+ nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err,
+ rate_cfg.status, out_size);
+ return -EIO;
+ }
+
+ if (!rate_cfg.status) {
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate;
+ }
+
+ return rate_cfg.status;
+}
+
+int hinic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ int err;
+
+ err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED)
+ return err;
+
+ if (min_rate) {
+ nic_err(hw_dev->dev_hdl, "Current firmware don't support to set min tx rate\n");
+ return -EINVAL;
+ }
+
+ nic_info(hw_dev->dev_hdl, "Current firmware don't support to set min tx rate, force min_tx_rate = max_tx_rate\n");
+
+ return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate);
+}
+
+int hinic_set_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io;
+ struct vf_data_storage *vf_infos;
+ struct hinic_vf_dcb_state vf_dcb = {0};
+ u16 vf_id, out_size = 0;
+ int err;
+
+ if (!hwdev || !dcb_state || !hw_dev->nic_io)
+ return -EINVAL;
+
+ nic_io = hw_dev->nic_io;
+ if (!memcmp(&nic_io->dcb_state, dcb_state, sizeof(nic_io->dcb_state)))
+ return 0;
+
+ memcpy(&vf_dcb.state, dcb_state, sizeof(vf_dcb.state));
+ /* save in sdk, vf will get dcb state when probing */
+ hinic_save_dcb_state(hwdev, dcb_state);
+
+ /* notify statefull in pf, than notify all vf */
+ hinic_notify_dcb_state_event(hwdev, dcb_state);
+
+ /* not vf supported, don't need to notify vf */
+ if (!nic_io->vf_infos)
+ return 0;
+
+ vf_infos = nic_io->vf_infos;
+ for (vf_id = 0; vf_id < nic_io->max_vfs; vf_id++) {
+ if (vf_infos[vf_id].registered) {
+ vf_dcb.status = 0;
+ out_size = sizeof(vf_dcb);
+ err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC,
+ OS_VF_ID_TO_HW(vf_id),
+ HINIC_PORT_CMD_SET_VF_COS,
+ &vf_dcb, sizeof(vf_dcb), &vf_dcb,
+ &out_size, 0);
+ if (err || vf_dcb.status || !out_size)
+ nic_err(hw_dev->dev_hdl,
+ "Failed to notify dcb state to VF %d, err: %d, status: 0x%x, out size: 0x%x\n",
+ vf_id, err, vf_dcb.status, out_size);
+ }
+ }
+
+ return 0;
+}
+
+int hinic_get_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev || !dcb_state)
+ return -EINVAL;
+
+ nic_io = hw_dev->nic_io;
+ memcpy(dcb_state, &nic_io->dcb_state, sizeof(*dcb_state));
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_dcb_state);
+
+int hinic_save_dcb_state(struct hinic_hwdev *hwdev,
+ struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev || !dcb_state)
+ return -EINVAL;
+
+ if (!hwdev->nic_io)
+ return -EINVAL;
+
+ nic_io = hwdev->nic_io;
+ memcpy(&nic_io->dcb_state, dcb_state, sizeof(*dcb_state));
+
+ return 0;
+}
+
+int hinic_get_pf_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_vf_dcb_state vf_dcb = {0};
+ u16 out_size = sizeof(vf_dcb);
+ int err;
+
+ if (!hwdev || !dcb_state)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) != TYPE_VF) {
+ nic_err(hw_dev->dev_hdl, "Only vf need to get pf dcb state\n");
+ return -EINVAL;
+ }
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_GET_VF_COS, &vf_dcb,
+ sizeof(vf_dcb), &vf_dcb,
+ &out_size, 0);
+ if (err || !out_size || vf_dcb.status) {
+ nic_err(hw_dev->dev_hdl, "Failed to get vf default cos, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vf_dcb.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(dcb_state, &vf_dcb.state, sizeof(*dcb_state));
+ /* Save dcb_state in hw for statefull module */
+ hinic_save_dcb_state(hwdev, dcb_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_get_pf_dcb_state);
+
+int hinic_set_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_ipsu_mac mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr)
+ return -EINVAL;
+
+ mac_info.index = index;
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_IPSU_MAC,
+ &mac_info, sizeof(mac_info), &mac_info,
+ &out_size);
+ if (err || !out_size || mac_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to set IPSU MAC(index %d), err: %d, status: 0x%x, out size: 0x%x\n",
+ index, err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 *vlan_id,
+ u16 *func_id)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_ipsu_mac mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !mac_addr)
+ return -EINVAL;
+
+ mac_info.index = index;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_IPSU_MAC,
+ &mac_info, sizeof(mac_info), &mac_info,
+ &out_size);
+ if (err || !out_size || mac_info.status) {
+ nic_err(nic_hwdev->dev_hdl,
+ "Failed to get IPSU MAC(index %d), err: %d, status: 0x%x, out size: 0x%x\n",
+ index, err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+ *func_id = mac_info.func_id;
+ *vlan_id = mac_info.vlan_id;
+ memcpy(mac_addr, mac_info.mac, ETH_ALEN);
+
+ return 0;
+}
+
+int hinic_set_anti_attack(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_port_anti_attack_rate rate = {0};
+ u16 out_size = sizeof(rate);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &rate.func_id);
+ if (err)
+ return err;
+
+ rate.enable = enable;
+ rate.cir = ANTI_ATTACK_DEFAULT_CIR;
+ rate.xir = ANTI_ATTACK_DEFAULT_XIR;
+ rate.cbs = ANTI_ATTACK_DEFAULT_CBS;
+ rate.xbs = ANTI_ATTACK_DEFAULT_XBS;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE,
+ &rate, sizeof(rate), &rate,
+ &out_size);
+ if (err || !out_size || rate.status) {
+ nic_err(nic_hwdev->dev_hdl, "Can't %s port Anti-Attack rate limit err: %d, status: 0x%x, out size: 0x%x\n",
+ (enable ? "enable" : "disable"), err, rate.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ nic_info(nic_hwdev->dev_hdl, "%s port Anti-Attack rate limit succeed\n",
+ (enable ? "Enable" : "Disable"));
+
+ return 0;
+}
+
+int hinic_flush_sq_res(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_clear_sq_resource sq_res = {0};
+ u16 out_size = sizeof(sq_res);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &sq_res.func_id);
+ if (err)
+ return err;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAR_SQ_RES,
+ &sq_res, sizeof(sq_res), &sq_res,
+ &out_size);
+ if (err || !out_size || sq_res.status) {
+ nic_err(dev->dev_hdl, "Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, sq_res.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_flush_sq_res);
+
+static int __set_pf_bw(struct hinic_hwdev *hwdev, u8 speed_level);
+
+int hinic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_cfg *nic_cfg = &dev->nic_io->nic_cfg;
+ int err = 0;
+
+ down(&nic_cfg->cfg_lock);
+
+ /* Enable PFC will disable pause */
+ if (nic_cfg->pfc_en) {
+ err = hinic_dcb_set_hw_pfc(hwdev, nic_cfg->pfc_en,
+ nic_cfg->pfc_bitmap);
+ if (err)
+ nic_err(dev->dev_hdl, "Failed to set pfc\n");
+
+ } else if (!port_info->autoneg_state || nic_cfg->pause_set) {
+ nic_cfg->nic_pause.auto_neg = port_info->autoneg_state;
+ err = hinic_set_hw_pause_info(hwdev, nic_cfg->nic_pause);
+ if (err)
+ nic_err(dev->dev_hdl, "Failed to set pause\n");
+ }
+
+ if (FUNC_SUPPORT_RATE_LIMIT(hwdev)) {
+ err = __set_pf_bw(hwdev, port_info->speed);
+ if (err)
+ nic_err(dev->dev_hdl, "Failed to set pf bandwidth limit\n");
+ }
+
+ up(&nic_cfg->cfg_lock);
+
+ return err;
+}
+
+int hinic_set_super_cqe_state(void *hwdev, bool enable)
+{
+ struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+ struct hinic_super_cqe super_cqe = {0};
+ u16 out_size = sizeof(super_cqe);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ err = hinic_global_func_id_get(hwdev, &super_cqe.func_id);
+ if (err)
+ return err;
+
+ super_cqe.super_cqe_en = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_SUPER_CQE,
+ &super_cqe, sizeof(super_cqe), &super_cqe,
+ &out_size);
+ if (err || !out_size || super_cqe.status) {
+ nic_err(nic_hwdev->dev_hdl, "Can't %s surper cqe, err: %d, status: 0x%x, out size: 0x%x\n",
+ (enable ? "enable" : "disable"), err, super_cqe.status,
+ out_size);
+ return -EINVAL;
+ }
+
+ nic_info(nic_hwdev->dev_hdl, "%s super cqe succeed\n",
+ (enable ? "Enable" : "Disable"));
+
+ return 0;
+}
+
+int hinic_set_port_routine_cmd_report(void *hwdev, bool enable)
+{
+ struct hinic_port_rt_cmd rt_cmd = { 0 };
+ struct hinic_hwdev *dev = hwdev;
+ u16 out_size = sizeof(rt_cmd);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ rt_cmd.pf_id = (u8)hinic_global_func_id(hwdev);
+ rt_cmd.enable = enable;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev,
+ HINIC_PORT_CMD_SET_PORT_REPORT,
+ &rt_cmd, sizeof(rt_cmd), &rt_cmd,
+ &out_size);
+ if (rt_cmd.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ nic_info(dev->dev_hdl, "Current firmware doesn't support to set port routine command report\n");
+ } else if (rt_cmd.status || err || !out_size) {
+ nic_err(dev->dev_hdl,
+ "Failed to set port routine command report, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rt_cmd.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_capture_info cap_info = {0};
+ u16 out_size = sizeof(cap_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ cap_info.op_type = 2; /* function capture */
+ cap_info.is_en_trx = cap_en;
+ cap_info.func_id = func_id;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_UCAPTURE_OPT,
+ &cap_info, sizeof(cap_info),
+ &cap_info, &out_size);
+ if (err || !out_size || cap_info.status) {
+ nic_err(dev->dev_hdl,
+ "Failed to set function capture attr, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, cap_info.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_set_func_capture_en);
+
+int hinic_force_drop_tx_pkt(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_force_pkt_drop pkt_drop = {0};
+ u16 out_size = sizeof(pkt_drop);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ pkt_drop.port = hinic_physical_port_id(hwdev);
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_FORCE_PKT_DROP,
+ &pkt_drop, sizeof(pkt_drop),
+ &pkt_drop, &out_size);
+ if ((pkt_drop.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ pkt_drop.status) || err || !out_size) {
+ nic_err(dev->dev_hdl,
+ "Failed to set force tx packets drop, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pkt_drop.status, out_size);
+ return -EFAULT;
+ }
+
+ return pkt_drop.status;
+}
+
+u32 hw_speed_convert[LINK_SPEED_LEVELS] = {
+ 10, 100, 1000, 10000,
+ 25000, 40000, 100000
+};
+
+static int __set_pf_bw(struct hinic_hwdev *hwdev, u8 speed_level)
+{
+ struct hinic_nic_cfg *nic_cfg = &hwdev->nic_io->nic_cfg;
+ struct hinic_tx_rate_cfg rate_cfg = {0};
+ u32 pf_bw = 0;
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ if (speed_level >= LINK_SPEED_LEVELS) {
+ nic_err(hwdev->dev_hdl, "Invalid speed level: %d\n",
+ speed_level);
+ return -EINVAL;
+ }
+
+ if (nic_cfg->pf_bw_limit == 100) {
+ pf_bw = 0; /* unlimit bandwidth */
+ } else {
+ pf_bw = (hw_speed_convert[speed_level] / 100) *
+ nic_cfg->pf_bw_limit;
+ /* bandwidth limit is very small but not unlimit in this case */
+ if (pf_bw == 0)
+ pf_bw = 1;
+ }
+
+ err = hinic_global_func_id_get(hwdev, &rate_cfg.func_id);
+ if (err)
+ return err;
+
+ rate_cfg.tx_rate = pf_bw;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_SET_VF_RATE, &rate_cfg,
+ sizeof(rate_cfg), &rate_cfg,
+ &out_size, 0);
+ if (err || !out_size || rate_cfg.status) {
+ nic_err(hwdev->dev_hdl, "Failed to set rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ pf_bw, err, rate_cfg.status, out_size);
+ if (rate_cfg.status)
+ return rate_cfg.status;
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_update_pf_bw(void *hwdev)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct nic_port_info port_info = {0};
+ int err;
+
+ if (hinic_func_type(hwdev) == TYPE_VF ||
+ !(FUNC_SUPPORT_RATE_LIMIT(hwdev)))
+ return 0;
+
+ err = hinic_get_port_info(hwdev, &port_info);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to get port info\n");
+ return -EIO;
+ }
+
+ err = __set_pf_bw(hwdev, port_info.speed);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to set pf bandwidth\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_set_pf_bw_limit(void *hwdev, u32 bw_limit)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_cfg *nic_cfg;
+ u32 old_bw_limit;
+ u8 link_state = 0;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (hinic_func_type(hwdev) == TYPE_VF)
+ return 0;
+
+ if (bw_limit > 100) {
+ nic_err(dev->dev_hdl, "Invalid bandwidth: %d\n", bw_limit);
+ return -EINVAL;
+ }
+
+ err = hinic_get_link_state(hwdev, &link_state);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to get link state\n");
+ return -EIO;
+ }
+
+ if (!link_state) {
+ nic_err(dev->dev_hdl, "Link status must be up when set pf tx rate\n");
+ return -EINVAL;
+ }
+
+ nic_cfg = &dev->nic_io->nic_cfg;
+ old_bw_limit = nic_cfg->pf_bw_limit;
+ nic_cfg->pf_bw_limit = bw_limit;
+
+ err = hinic_update_pf_bw(hwdev);
+ if (err) {
+ nic_cfg->pf_bw_limit = old_bw_limit;
+ return err;
+ }
+
+ return 0;
+}
+
+/* Set link status follow port status */
+int hinic_set_link_status_follow(void *hwdev,
+ enum hinic_link_follow_status status)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_set_link_follow follow = {0};
+ u16 out_size = sizeof(follow);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (status >= HINIC_LINK_FOLLOW_STATUS_MAX) {
+ nic_err(dev->dev_hdl,
+ "Invalid link follow status: %d\n", status);
+ return -EINVAL;
+ }
+
+ err = hinic_global_func_id_get(hwdev, &follow.func_id);
+ if (err)
+ return err;
+
+ follow.follow_status = status;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LINK_FOLLOW,
+ &follow, sizeof(follow), &follow,
+ &out_size);
+ if ((follow.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ follow.status) || err || !out_size) {
+ nic_err(dev->dev_hdl,
+ "Failed to set link status follow port status, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, follow.status, out_size);
+ return -EFAULT;
+ }
+
+ return follow.status;
+}
+EXPORT_SYMBOL(hinic_set_link_status_follow);
+
+/* HILINK module */
+
+#define HINIC_MGMT_DEFAULT_SIZE 1
+
+static int __hilink_msg_to_mgmt_sync(void *hwdev, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size,
+ u32 timeout)
+{
+ int err;
+
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_HILINK, cmd, buf_in,
+ in_size, buf_out, out_size, timeout);
+ if (err)
+ return err;
+
+ if (*out_size == HINIC_MGMT_DEFAULT_SIZE && buf_out)
+ *((u8 *)(buf_out)) = HINIC_MGMT_CMD_UNSUPPORTED;
+
+ return 0;
+}
+
+int hinic_get_hilink_link_info(void *hwdev, struct hinic_link_info *info)
+{
+ struct hinic_hilink_link_info link_info = {0};
+ u16 out_size = sizeof(link_info);
+ int err;
+
+ link_info.port_id = hinic_physical_port_id(hwdev);
+
+ err = __hilink_msg_to_mgmt_sync(hwdev, HINIC_HILINK_CMD_GET_LINK_INFO,
+ &link_info, sizeof(link_info),
+ &link_info, &out_size, 0);
+ if ((link_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ link_info.status) || err || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get hilink info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_info.status, out_size);
+ return -EFAULT;
+ }
+
+ if (!link_info.status)
+ memcpy(info, &link_info.info, sizeof(*info));
+ else if (link_info.status == HINIC_MGMT_CMD_UNSUPPORTED)
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupported command: mod: %d, cmd: %d\n",
+ HINIC_MOD_HILINK, HINIC_HILINK_CMD_GET_LINK_INFO);
+
+ return link_info.status;
+}
+
+int hinic_set_link_settings(void *hwdev, struct hinic_link_ksettings *settings)
+{
+ struct hinic_link_ksettings_info info = {0};
+ u16 out_size = sizeof(info);
+ int err;
+
+ err = hinic_global_func_id_get(hwdev, &info.func_id);
+ if (err)
+ return err;
+
+ info.valid_bitmap = settings->valid_bitmap;
+ info.autoneg = settings->autoneg;
+ info.speed = settings->speed;
+ info.fec = settings->fec;
+
+ err = __hilink_msg_to_mgmt_sync(hwdev,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS,
+ &info, sizeof(info),
+ &info, &out_size, 0);
+ if ((info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ info.status) || err || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, info.status, out_size);
+ return -EFAULT;
+ }
+
+ return info.status;
+}
+
+int hinic_disable_tx_promisc(void *hwdev)
+{
+ struct hinic_promsic_info info = {0};
+ u16 out_size = sizeof(info);
+ int err;
+
+ err = hinic_global_func_id_get(hwdev, &info.func_id);
+ if (err)
+ return err;
+
+ info.cfg = HINIC_TX_PROMISC_DISABLE;
+ err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_DISABLE_PROMISC, &info,
+ sizeof(info), &info, &out_size, 0);
+ if (err || !out_size || info.status) {
+ if (info.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupported to disable TX promisc\n");
+ return 0;
+ }
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to disable multihost promisc, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, info.status, out_size);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static bool hinic_if_sfp_absent(void *hwdev)
+{
+ struct card_node *chip_node = ((struct hinic_hwdev *)hwdev)->chip_node;
+ struct hinic_port_routine_cmd *rt_cmd;
+ struct hinic_cmd_get_light_module_abs sfp_abs = {0};
+ u8 port_id = hinic_physical_port_id(hwdev);
+ u16 out_size = sizeof(sfp_abs);
+ int err;
+ bool sfp_abs_valid;
+ bool sfp_abs_status;
+
+ rt_cmd = &chip_node->rt_cmd[port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ sfp_abs_valid = rt_cmd->up_send_sfp_abs;
+ sfp_abs_status = (bool)rt_cmd->abs.abs_status;
+ if (sfp_abs_valid) {
+ mutex_unlock(&chip_node->sfp_mutex);
+ return sfp_abs_status;
+ }
+ mutex_unlock(&chip_node->sfp_mutex);
+
+ sfp_abs.port_id = port_id;
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_SFP_ABS,
+ &sfp_abs, sizeof(sfp_abs), &sfp_abs,
+ &out_size);
+ if (sfp_abs.status || err || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get port%d sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n",
+ port_id, err, sfp_abs.status, out_size);
+ return true;
+ }
+
+ return ((sfp_abs.abs_status == 0) ? false : true);
+}
+
+int hinic_get_sfp_eeprom(void *hwdev, u8 *data, u16 *len)
+{
+ struct hinic_cmd_get_std_sfp_info sfp_info = {0};
+ u8 port_id;
+ u16 out_size = sizeof(sfp_info);
+ int err;
+
+ if (!hwdev || !data || !len)
+ return -EINVAL;
+
+ port_id = hinic_physical_port_id(hwdev);
+ if (port_id >= HINIC_MAX_PORT_ID)
+ return -EINVAL;
+
+ if (hinic_if_sfp_absent(hwdev))
+ return -ENXIO;
+
+ sfp_info.port_id = port_id;
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_STD_SFP_INFO,
+ &sfp_info, sizeof(sfp_info), &sfp_info,
+ &out_size);
+ if (sfp_info.status || err || !out_size) {
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to get port%d sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n",
+ port_id, err, sfp_info.status, out_size);
+ return -EIO;
+ }
+
+ *len = min_t(u16, sfp_info.eeprom_len, STD_SFP_INFO_MAX_SIZE);
+ memcpy(data, sfp_info.sfp_info, STD_SFP_INFO_MAX_SIZE);
+
+ return 0;
+}
+
+int hinic_get_sfp_type(void *hwdev, u8 *data0, u8 *data1)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic_port_routine_cmd *rt_cmd;
+ u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
+ u16 len;
+ u8 port_id;
+ int err;
+
+ if (!hwdev || !data0 || !data1)
+ return -EINVAL;
+
+ port_id = hinic_physical_port_id(hwdev);
+ if (port_id >= HINIC_MAX_PORT_ID)
+ return -EINVAL;
+
+ if (hinic_if_sfp_absent(hwdev))
+ return -ENXIO;
+
+ chip_node = ((struct hinic_hwdev *)hwdev)->chip_node;
+ rt_cmd = &chip_node->rt_cmd[port_id];
+ mutex_lock(&chip_node->sfp_mutex);
+ if (rt_cmd->up_send_sfp_info) {
+ *data0 = rt_cmd->sfp_info.sfp_qsfp_info[0];
+ *data1 = rt_cmd->sfp_info.sfp_qsfp_info[1];
+ mutex_unlock(&chip_node->sfp_mutex);
+ return 0;
+ }
+ mutex_unlock(&chip_node->sfp_mutex);
+
+ err = hinic_get_sfp_eeprom(hwdev, sfp_data, &len);
+ if (err)
+ return err;
+
+ *data0 = sfp_data[0];
+ *data1 = sfp_data[1];
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
new file mode 100644
index 000000000000..936184255dfd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h
@@ -0,0 +1,638 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_CFG_H
+#define HINIC_CFG_H
+
+#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)
+#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)
+
+#define FW_SUPPORT_MAC_REUSE 0x1
+#define FW_SUPPORT_MAC_REUSE_FUNC(hwdev) \
+ ((hwdev)->fw_support_func_flag & FW_SUPPORT_MAC_REUSE)
+
+#define HINIC_VLAN_PRIORITY_SHIFT 13
+
+#define HINIC_RSS_INDIR_SIZE 256
+#define HINIC_DCB_TC_MAX 0x8
+#define HINIC_DCB_UP_MAX 0x8
+#define HINIC_DCB_COS_MAX 0x8
+#define HINIC_DCB_PG_MAX 0x8
+
+#define HINIC_DCB_TSA_TC_SP 2
+#define HINIC_DCB_TSA_TC_DWRR 0
+
+#define HINIC_RSS_KEY_SIZE 40
+
+#define HINIC_MAX_NUM_RQ 128
+
+#define HINIC_MIN_MTU_SIZE 256
+#define HINIC_MAX_JUMBO_FRAME_SIZE 9600
+
+#define HINIC_LRO_MAX_WQE_NUM_UPPER 32
+#define HINIC_LRO_MAX_WQE_NUM_LOWER 1
+#define HINIC_LRO_MAX_WQE_NUM_DEFAULT_ARM 4
+#define HINIC_LRO_MAX_WQE_NUM_DEFAULT_X86 8
+#define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8
+#define HINIC_LRO_WQE_NUM_PANGEA_DEFAULT 32
+
+#define HINIC_LRO_RX_TIMER_UPPER 1024
+#define HINIC_LRO_RX_TIMER_LOWER 1
+#define HINIC_LRO_RX_TIMER_DEFAULT 16
+#define HINIC_LRO_RX_TIMER_DEFAULT_25GE 16
+#define HINIC_LRO_RX_TIMER_DEFAULT_100GE 64
+#define HINIC_LRO_RX_TIMER_DEFAULT_PG_10GE 10
+#define HINIC_LRO_RX_TIMER_DEFAULT_PG_100GE 8
+
+#if defined(__aarch64__)
+#define HINIC_LOWEST_LATENCY 1
+#define HINIC_RX_RATE_LOW 400000
+#define HINIC_RX_COAL_TIME_LOW 20
+#define HINIC_RX_PENDING_LIMIT_LOW 2
+#define HINIC_RX_RATE_HIGH 1000000
+#define HINIC_RX_COAL_TIME_HIGH 225
+#define HINIC_RX_PENDING_LIMIT_HIGH 50
+#define HINIC_RX_RATE_THRESH 35000
+#define HINIC_TX_RATE_THRESH 35000
+#define HINIC_RX_RATE_LOW_VM 400000
+#define HINIC_RX_PENDING_LIMIT_HIGH_VM 50
+#else
+#define HINIC_LOWEST_LATENCY 1
+#define HINIC_RX_RATE_LOW 400000
+#define HINIC_RX_COAL_TIME_LOW 16
+#define HINIC_RX_PENDING_LIMIT_LOW 2
+#define HINIC_RX_RATE_HIGH 1000000
+#define HINIC_RX_COAL_TIME_HIGH 225
+#define HINIC_RX_PENDING_LIMIT_HIGH 8
+#define HINIC_RX_RATE_THRESH 50000
+#define HINIC_TX_RATE_THRESH 50000
+#define HINIC_RX_RATE_LOW_VM 100000
+#define HINIC_RX_PENDING_LIMIT_HIGH_VM 87
+#endif
+
+enum hinic_board_type {
+ HINIC_BOARD_UNKNOWN = 0,
+ HINIC_BOARD_10GE = 1,
+ HINIC_BOARD_25GE = 2,
+ HINIC_BOARD_40GE = 3,
+ HINIC_BOARD_100GE = 4,
+ HINIC_BOARD_PG_TP_10GE = 5,
+ HINIC_BOARD_PG_SM_25GE = 6,
+ HINIC_BOARD_PG_100GE = 7,
+};
+
+enum hinic_os_type {
+ HINIC_OS_UNKNOWN = 0,
+ HINIC_OS_HUAWEI = 1,
+ HINIC_OS_NON_HUAWEI = 2,
+};
+
+enum hinic_cpu_type {
+ HINIC_CPU_UNKNOWN = 0,
+ HINIC_CPU_X86_GENERIC = 1,
+ HINIC_CPU_ARM_GENERIC = 2,
+};
+
+struct hinic_adaptive_rx_cfg {
+ u32 lowest_lat;
+ u32 rate_low;
+ u32 coal_time_low;
+ u32 pending_limit_low;
+ u32 rate_high;
+ u32 coal_time_high;
+ u32 pending_limit_high;
+ u32 rate_thresh;
+};
+
+struct hinic_lro_cfg {
+ u32 enable;
+ u32 timer;
+ u32 buffer_size;
+};
+
+struct hinic_environment_info {
+ enum hinic_board_type board;
+ enum hinic_os_type os;
+ enum hinic_cpu_type cpu;
+};
+
+struct hinic_adaptive_cfg {
+ struct hinic_adaptive_rx_cfg adaptive_rx;
+ struct hinic_lro_cfg lro;
+};
+
+enum hinic_rss_hash_type {
+ HINIC_RSS_HASH_ENGINE_TYPE_XOR = 0,
+ HINIC_RSS_HASH_ENGINE_TYPE_TOEP,
+
+ HINIC_RSS_HASH_ENGINE_TYPE_MAX,
+};
+
+struct ifla_vf_info;
+struct hinic_dcb_state;
+
+struct nic_port_info {
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+};
+
+enum nic_media_type {
+ MEDIA_UNKNOWN = -1,
+ MEDIA_FIBRE = 0,
+ MEDIA_COPPER,
+ MEDIA_BACKPLANE
+};
+
+enum nic_speed_level {
+ LINK_SPEED_10MB = 0,
+ LINK_SPEED_100MB,
+ LINK_SPEED_1GB,
+ LINK_SPEED_10GB,
+ LINK_SPEED_25GB,
+ LINK_SPEED_40GB,
+ LINK_SPEED_100GB,
+ LINK_SPEED_LEVELS,
+};
+
+enum hinic_link_mode {
+ HINIC_10GE_BASE_KR = 0,
+ HINIC_40GE_BASE_KR4 = 1,
+ HINIC_40GE_BASE_CR4 = 2,
+ HINIC_100GE_BASE_KR4 = 3,
+ HINIC_100GE_BASE_CR4 = 4,
+ HINIC_25GE_BASE_KR_S = 5,
+ HINIC_25GE_BASE_CR_S = 6,
+ HINIC_25GE_BASE_KR = 7,
+ HINIC_25GE_BASE_CR = 8,
+ HINIC_GE_BASE_KX = 9,
+ HINIC_LINK_MODE_NUMBERS,
+
+ HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
+};
+
+enum hinic_port_type {
+ HINIC_PORT_TP, /* BASET */
+ HINIC_PORT_AUI,
+ HINIC_PORT_MII,
+ HINIC_PORT_FIBRE, /* OPTICAL */
+ HINIC_PORT_BNC,
+ HINIC_PORT_ELEC,
+ HINIC_PORT_COPPER, /* PORT_DA */
+ HINIC_PORT_AOC,
+ HINIC_PORT_BACKPLANE,
+ HINIC_PORT_NONE = 0xEF,
+ HINIC_PORT_OTHER = 0xFF,
+};
+
+enum hinic_link_status {
+ HINIC_LINK_DOWN = 0,
+ HINIC_LINK_UP
+};
+
+struct nic_pause_config {
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct nic_lro_info {
+ u16 func_id;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 lro_timer_en;
+ u32 lro_period;
+};
+
+struct nic_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+struct hinic_vport_stats {
+ u64 tx_unicast_pkts_vport;
+ u64 tx_unicast_bytes_vport;
+ u64 tx_multicast_pkts_vport;
+ u64 tx_multicast_bytes_vport;
+ u64 tx_broadcast_pkts_vport;
+ u64 tx_broadcast_bytes_vport;
+
+ u64 rx_unicast_pkts_vport;
+ u64 rx_unicast_bytes_vport;
+ u64 rx_multicast_pkts_vport;
+ u64 rx_multicast_bytes_vport;
+ u64 rx_broadcast_pkts_vport;
+ u64 rx_broadcast_bytes_vport;
+
+ u64 tx_discard_vport;
+ u64 rx_discard_vport;
+ u64 tx_err_vport;
+ u64 rx_err_vport;
+};
+
+struct hinic_phy_port_stats {
+ u64 mac_rx_total_pkt_num;
+ u64 mac_rx_total_oct_num;
+ u64 mac_rx_bad_pkt_num;
+ u64 mac_rx_bad_oct_num;
+ u64 mac_rx_good_pkt_num;
+ u64 mac_rx_good_oct_num;
+ u64 mac_rx_uni_pkt_num;
+ u64 mac_rx_multi_pkt_num;
+ u64 mac_rx_broad_pkt_num;
+
+ u64 mac_tx_total_pkt_num;
+ u64 mac_tx_total_oct_num;
+ u64 mac_tx_bad_pkt_num;
+ u64 mac_tx_bad_oct_num;
+ u64 mac_tx_good_pkt_num;
+ u64 mac_tx_good_oct_num;
+ u64 mac_tx_uni_pkt_num;
+ u64 mac_tx_multi_pkt_num;
+ u64 mac_tx_broad_pkt_num;
+
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undersize_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_64_oct_pkt_num;
+ u64 mac_rx_65_127_oct_pkt_num;
+ u64 mac_rx_128_255_oct_pkt_num;
+ u64 mac_rx_256_511_oct_pkt_num;
+ u64 mac_rx_512_1023_oct_pkt_num;
+ u64 mac_rx_1024_1518_oct_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_pkt_num;
+ u64 mac_rx_1519_max_good_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+
+ u64 mac_rx_pause_num;
+ u64 mac_rx_pfc_pkt_num;
+ u64 mac_rx_pfc_pri0_pkt_num;
+ u64 mac_rx_pfc_pri1_pkt_num;
+ u64 mac_rx_pfc_pri2_pkt_num;
+ u64 mac_rx_pfc_pri3_pkt_num;
+ u64 mac_rx_pfc_pri4_pkt_num;
+ u64 mac_rx_pfc_pri5_pkt_num;
+ u64 mac_rx_pfc_pri6_pkt_num;
+ u64 mac_rx_pfc_pri7_pkt_num;
+ u64 mac_rx_control_pkt_num;
+ u64 mac_rx_y1731_pkt_num;
+ u64 mac_rx_sym_err_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undersize_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_64_oct_pkt_num;
+ u64 mac_tx_65_127_oct_pkt_num;
+ u64 mac_tx_128_255_oct_pkt_num;
+ u64 mac_tx_256_511_oct_pkt_num;
+ u64 mac_tx_512_1023_oct_pkt_num;
+ u64 mac_tx_1024_1518_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_pkt_num;
+ u64 mac_tx_1519_max_good_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+
+ u64 mac_tx_pause_num;
+ u64 mac_tx_pfc_pkt_num;
+ u64 mac_tx_pfc_pri0_pkt_num;
+ u64 mac_tx_pfc_pri1_pkt_num;
+ u64 mac_tx_pfc_pri2_pkt_num;
+ u64 mac_tx_pfc_pri3_pkt_num;
+ u64 mac_tx_pfc_pri4_pkt_num;
+ u64 mac_tx_pfc_pri5_pkt_num;
+ u64 mac_tx_pfc_pri6_pkt_num;
+ u64 mac_tx_pfc_pri7_pkt_num;
+ u64 mac_tx_control_pkt_num;
+ u64 mac_tx_y1731_pkt_num;
+ u64 mac_tx_1588_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+
+ u64 mac_rx_higig2_ext_pkt_num;
+ u64 mac_rx_higig2_message_pkt_num;
+ u64 mac_rx_higig2_error_pkt_num;
+ u64 mac_rx_higig2_cpu_ctrl_pkt_num;
+ u64 mac_rx_higig2_unicast_pkt_num;
+ u64 mac_rx_higig2_broadcast_pkt_num;
+ u64 mac_rx_higig2_l2_multicast_pkt_num;
+ u64 mac_rx_higig2_l3_multicast_pkt_num;
+
+ u64 mac_tx_higig2_message_pkt_num;
+ u64 mac_tx_higig2_ext_pkt_num;
+ u64 mac_tx_higig2_cpu_ctrl_pkt_num;
+ u64 mac_tx_higig2_unicast_pkt_num;
+ u64 mac_tx_higig2_broadcast_pkt_num;
+ u64 mac_tx_higig2_l2_multicast_pkt_num;
+ u64 mac_tx_higig2_l3_multicast_pkt_num;
+};
+
+enum hinic_rq_filter_type {
+ HINIC_RQ_FILTER_TYPE_NONE = 0x0,
+ HINIC_RQ_FILTER_TYPE_MAC_ONLY = (1 << 0),
+ HINIC_RQ_FILTER_TYPE_VLAN_ONLY = (1 << 1),
+ HINIC_RQ_FILTER_TYPE_VLANMAC = (1 << 2),
+ HINIC_RQ_FILTER_TYPE_VXLAN = (1 << 3),
+ HINIC_RQ_FILTER_TYPE_GENEVE = (1 << 4),
+};
+
+struct hinic_rq_filter_info {
+ u16 qid;
+ u8 filter_type;/* 1: mac, 8: vxlan */
+ u8 qflag;/*0:stdq, 1:defq, 2: netq*/
+
+ u8 mac[ETH_ALEN];
+ struct {
+ u8 inner_mac[ETH_ALEN];
+ u32 vni;
+ } vxlan;
+};
+
+#define HINIC_MGMT_VERSION_MAX_LEN 32
+
+#define HINIC_FW_VERSION_NAME 16
+#define HINIC_FW_VERSION_SECTION_CNT 4
+#define HINIC_FW_VERSION_SECTION_BORDER 0xFF
+struct hinic_fw_version {
+ u8 mgmt_ver[HINIC_FW_VERSION_NAME];
+ u8 microcode_ver[HINIC_FW_VERSION_NAME];
+ u8 boot_ver[HINIC_FW_VERSION_NAME];
+};
+
+enum hinic_valid_link_settings {
+ HILINK_LINK_SET_SPEED = 0x1,
+ HILINK_LINK_SET_AUTONEG = 0x2,
+ HILINK_LINK_SET_FEC = 0x4,
+};
+
+struct hinic_link_ksettings {
+ u32 valid_bitmap;
+ u32 speed; /* enum nic_speed_level */
+ u8 autoneg; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+};
+
+enum hinic_link_follow_status {
+ HINIC_LINK_FOLLOW_DEFAULT,
+ HINIC_LINK_FOLLOW_PORT,
+ HINIC_LINK_FOLLOW_SEPARATE,
+ HINIC_LINK_FOLLOW_STATUS_MAX,
+};
+
+enum hinic_lro_en_status {
+ HINIC_LRO_STATUS_DISABLE,
+ HINIC_LRO_STATUS_ENABLE,
+ HINIC_LRO_STATUS_UNSET,
+};
+
+#define HINIC_VLAN_FILTER_EN BIT(0)
+#define HINIC_BROADCAST_FILTER_EX_EN BIT(1)
+
+#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
+
+/* Set mac_vlan table */
+int hinic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id);
+
+int hinic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id);
+
+int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac,
+ u16 vlan_id, u16 func_id);
+int hinic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id);
+/* Obtaining the permanent mac */
+int hinic_get_default_mac(void *hwdev, struct net_device *netdev);
+/* Check whether the current solution is using this interface,
+ * the current code does not invoke the sdk interface to set mtu
+ */
+int hinic_set_port_mtu(void *hwdev, u32 new_mtu);
+/* Set vlan leaf table */
+int hinic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id);
+
+int hinic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl);
+
+int hinic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id);
+
+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info);
+
+int hinic_set_autoneg(void *hwdev, bool enable);
+
+int hinic_force_port_relink(void *hwdev);
+
+int hinic_get_link_mode(void *hwdev, enum hinic_link_mode *supported,
+ enum hinic_link_mode *advertised);
+
+int hinic_set_port_link_status(void *hwdev, bool enable);
+
+int hinic_set_speed(void *hwdev, enum nic_speed_level speed);
+/* SPEED_UNKNOWN = -1,SPEED_10MB_LINK = 0 */
+int hinic_get_speed(void *hwdev, enum nic_speed_level *speed);
+
+int hinic_get_link_state(void *hwdev, u8 *link_state);
+
+int hinic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause);
+
+int hinic_get_hw_pause_info(void *hwdev, struct nic_pause_config *nic_pause);
+
+int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause);
+
+int hinic_set_rx_mode(void *hwdev, u32 enable);
+
+/* offload feature */
+int hinic_set_rx_vlan_offload(void *hwdev, u8 en);
+
+int hinic_set_rx_csum_offload(void *hwdev, u32 en);
+
+int hinic_set_tx_tso(void *hwdev, u8 tso_en);
+
+/* Linux NIC used */
+int hinic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 wqe_num);
+
+/* Win NIC used */
+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num);
+
+/* Related command dcbtool */
+int hinic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap);
+
+int hinic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap);
+
+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid,
+ u8 *up_bw, u8 *prio);
+
+int hinic_dcb_get_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid,
+ u8 *up_bw, u8 *prio);
+
+int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up);
+
+int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map);
+
+/* nictool adaptation interface*/
+int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period);
+/* There should be output parameters, add the
+ * output parameter struct nic_up_offload *cfg
+ */
+int hinic_get_rx_lro(void *hwdev, struct nic_lro_info *lro_info);
+
+int hinic_get_jumbo_frame_size(void *hwdev, u32 *jumbo_size);
+
+int hinic_set_jumbo_frame_size(void *hwdev, u32 jumbo_size);
+
+int hinic_set_loopback_mode(void *hwdev, bool enable);
+int hinic_set_loopback_mode_ex(void *hwdev, u32 mode, u32 enable);
+int hinic_get_loopback_mode_ex(void *hwdev, u32 *mode, u32 *enable);
+
+int hinic_get_port_enable_state(void *hwdev, bool *enable);
+
+int hinic_get_vport_enable_state(void *hwdev, bool *enable);
+
+int hinic_set_lli_state(void *hwdev, u8 lli_state);
+
+int hinic_set_vport_enable(void *hwdev, bool enable);
+
+int hinic_set_port_enable(void *hwdev, bool enable);
+
+/* rss */
+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type);
+
+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx,
+ struct nic_rss_type *rss_type);
+
+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, const u8 *temp);
+
+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp);
+
+int hinic_rss_get_hash_engine(void *hwdev, u8 tmpl_idx, u8 *type);
+
+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type);
+
+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table);
+
+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, const u32 *indir_table);
+
+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc);
+
+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx);
+
+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx);
+
+/* disable or enable traffic of all functions in the same port */
+int hinic_set_port_funcs_state(void *hwdev, bool enable);
+
+int hinic_reset_port_link_cfg(void *hwdev);
+
+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats);
+
+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats);
+
+int hinic_get_mgmt_version(void *hwdev, u8 *mgmt_ver);
+
+int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver);
+
+int hinic_save_vf_mac(void *hwdev, u16 vf_id, u8 *mac);
+
+int hinic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos);
+
+int hinic_kill_vf_vlan(void *hwdev, int vf_id);
+
+int hinic_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr);
+
+u16 hinic_vf_info_vlanprio(void *hwdev, int vf_id);
+
+bool hinic_vf_is_registered(void *hwdev, u16 vf_id);
+
+void hinic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi);
+
+void hinic_notify_all_vfs_link_changed(void *hwdev, u8 link);
+
+void hinic_save_pf_link_status(void *hwdev, u8 link);
+
+int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link);
+
+int hinic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk);
+
+bool hinic_vf_info_spoofchk(void *hwdev, int vf_id);
+
+int hinic_set_vf_trust(void *hwdev, u16 vf_id, bool trust);
+bool hinic_vf_info_trust(void *hwdev, int vf_id);
+
+int hinic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate);
+
+int hinic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id);
+
+int hinic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id);
+
+int hinic_set_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state);
+
+int hinic_get_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state);
+
+int hinic_get_pf_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state);
+
+int hinic_set_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic_get_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 *vlan_id,
+ u16 *func_id);
+int hinic_set_anti_attack(void *hwdev, bool enable);
+
+int hinic_flush_sq_res(void *hwdev);
+
+int hinic_set_super_cqe_state(void *hwdev, bool enable);
+
+int hinic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en);
+
+int hinic_force_drop_tx_pkt(void *hwdev);
+
+int hinic_update_pf_bw(void *hwdev);
+
+int hinic_set_pf_bw_limit(void *hwdev, u32 bw_limit);
+
+int hinic_set_link_status_follow(void *hwdev,
+ enum hinic_link_follow_status status);
+int hinic_disable_tx_promisc(void *hwdev);
+
+/* HILINK module */
+int hinic_set_link_settings(void *hwdev, struct hinic_link_ksettings *settings);
+
+int hinic_enable_netq(void *hwdev, u8 en);
+int hinic_add_hw_rqfilter(void *hwdev,
+ struct hinic_rq_filter_info *filter_info);
+int hinic_del_hw_rqfilter(void *hwdev,
+ struct hinic_rq_filter_info *filter_info);
+int hinic_get_sfp_eeprom(void *hwdev, u8 *data, u16 *len);
+int hinic_get_sfp_type(void *hwdev, u8 *data0, u8 *data1);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c
new file mode 100644
index 000000000000..7407241216f6
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_hwif.h"
+#include "hinic_wq.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic.h"
+#include "hinic_dbg.h"
+
+#define INVALID_PI 0xFFFF
+
+u16 hinic_dbg_get_qp_num(void *hwdev)
+{
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev)
+ return 0;
+
+ nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ if (!nic_io)
+ return 0;
+
+ return nic_io->num_qps;
+}
+
+void *hinic_dbg_get_qp_handle(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io;
+
+ if (!hwdev)
+ return NULL;
+
+ nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ if (!nic_io)
+ return NULL;
+
+ if (q_id >= nic_io->num_qps)
+ return NULL;
+
+ return &nic_io->qps[q_id];
+}
+
+void *hinic_dbg_get_sq_wq_handle(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return NULL;
+
+ return qp->sq.wq;
+}
+
+void *hinic_dbg_get_rq_wq_handle(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return NULL;
+
+ return qp->rq.wq;
+}
+
+u16 hinic_dbg_get_sq_pi(void *hwdev, u16 q_id)
+{
+ struct hinic_wq *wq = hinic_dbg_get_sq_wq_handle(hwdev, q_id);
+
+ if (!wq)
+ return 0;
+
+ return ((u16)wq->prod_idx) & wq->mask;
+}
+
+u16 hinic_dbg_get_rq_hw_pi(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (qp)
+ return cpu_to_be16(*qp->rq.pi_virt_addr);
+
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Get rq hw pi failed\n");
+
+ return INVALID_PI;
+}
+
+u16 hinic_dbg_get_rq_sw_pi(void *hwdev, u16 q_id)
+{
+ struct hinic_wq *wq = hinic_dbg_get_rq_wq_handle(hwdev, q_id);
+
+ if (!wq)
+ return 0;
+
+ return ((u16)wq->prod_idx) & wq->mask;
+}
+
+void *hinic_dbg_get_sq_ci_addr(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return NULL;
+
+ return qp->sq.cons_idx_addr;
+}
+
+u64 hinic_dbg_get_sq_cla_addr(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return 0;
+
+ return qp->sq.wq->block_paddr;
+}
+
+u64 hinic_dbg_get_rq_cla_addr(void *hwdev, u16 q_id)
+{
+ struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+
+ if (!qp)
+ return 0;
+
+ return qp->rq.wq->block_paddr;
+}
+
+int hinic_dbg_get_sq_db_addr(void *hwdev, u16 q_id, u64 **map_addr,
+ u64 *phy_addr, u32 *pg_idx)
+{
+ struct hinic_qp *qp;
+ struct hinic_hwif *hwif;
+
+ qp = hinic_dbg_get_qp_handle(hwdev, q_id);
+ if (!qp)
+ return -EFAULT;
+
+ hwif = ((struct hinic_hwdev *)hwdev)->hwif;
+
+ *map_addr = (u64 *)qp->sq.db_addr;
+ *pg_idx = DB_IDX(qp->sq.db_addr, hwif->db_base);
+ *phy_addr = hwif->db_base_phy + (*pg_idx) * HINIC_DB_PAGE_SIZE;
+
+ return 0;
+}
+
+u16 hinic_dbg_get_global_qpn(const void *hwdev)
+{
+ if (!hwdev)
+ return 0;
+
+ return ((struct hinic_hwdev *)hwdev)->nic_io->global_qpn;
+}
+
+static int get_wqe_info(struct hinic_wq *wq, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size)
+{
+ void *src_wqe;
+ u32 offset;
+ u16 i;
+
+ if (idx + wqebb_cnt > wq->q_depth)
+ return -EFAULT;
+
+ if (*wqe_size != (u16)(wq->wqebb_size * wqebb_cnt)) {
+ pr_err("Unexpect out buf size from user: %d, expect: %d\n",
+ *wqe_size, (u16)(wq->wqebb_size * wqebb_cnt));
+ return -EFAULT;
+ }
+
+ for (i = 0; i < wqebb_cnt; i++) {
+ src_wqe = (void *)hinic_slq_get_addr(wq, idx + i);
+ offset = i * wq->wqebb_size;
+ memcpy(wqe + offset, src_wqe, wq->wqebb_size);
+ }
+
+ return 0;
+}
+
+int hinic_dbg_get_sq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size)
+{
+ struct hinic_wq *wq;
+ int err;
+
+ wq = hinic_dbg_get_sq_wq_handle(hwdev, q_id);
+ if (!wq)
+ return -EFAULT;
+
+ err = get_wqe_info(wq, idx, wqebb_cnt, wqe, wqe_size);
+
+ return err;
+}
+
+int hinic_dbg_get_rq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt,
+ u8 *wqe, u16 *wqe_size)
+{
+ struct hinic_wq *wq;
+ int err;
+
+ wq = hinic_dbg_get_rq_wq_handle(hwdev, q_id);
+ if (!wq)
+ return -EFAULT;
+
+ err = get_wqe_info(wq, idx, wqebb_cnt, wqe, wqe_size);
+
+ return err;
+}
+
+int hinic_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size)
+{
+ if (!hw_stats || *out_size != sizeof(struct hinic_hw_stats)) {
+ pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(struct hinic_hw_stats));
+ return -EFAULT;
+ }
+
+ memcpy(hw_stats, &((struct hinic_hwdev *)hwdev)->hw_stats,
+ sizeof(struct hinic_hw_stats));
+ return 0;
+}
+
+u16 hinic_dbg_clear_hw_stats(void *hwdev, u32 *out_size)
+{
+ if (*out_size != sizeof(struct hinic_hw_stats)) {
+ pr_err("Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(struct hinic_hw_stats));
+ return -EFAULT;
+ }
+
+ memset((void *)&((struct hinic_hwdev *)hwdev)->hw_stats, 0,
+ sizeof(struct hinic_hw_stats));
+ memset((void *)((struct hinic_hwdev *)hwdev)->chip_fault_stats, 0,
+ HINIC_CHIP_FAULT_SIZE);
+
+ return 0;
+}
+
+void hinic_get_chip_fault_stats(const void *hwdev,
+ u8 *chip_fault_stats, int offset)
+{
+ if (offset < 0 || offset > HINIC_CHIP_FAULT_SIZE) {
+ pr_err("Invalid chip offset value: %d\n", offset);
+ return;
+ }
+
+ if (offset + MAX_DRV_BUF_SIZE <= HINIC_CHIP_FAULT_SIZE)
+ memcpy(chip_fault_stats,
+ ((struct hinic_hwdev *)hwdev)->chip_fault_stats + offset,
+ MAX_DRV_BUF_SIZE);
+ else
+ memcpy(chip_fault_stats,
+ ((struct hinic_hwdev *)hwdev)->chip_fault_stats + offset,
+ HINIC_CHIP_FAULT_SIZE - offset);
+}
+
+int hinic_dbg_get_pf_bw_limit(void *hwdev, u32 *pf_bw_limit)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_cfg *nic_cfg;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (!dev->nic_io)
+ return -EINVAL;
+
+ nic_cfg = &dev->nic_io->nic_cfg;
+
+ *pf_bw_limit = nic_cfg->pf_bw_limit;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
new file mode 100644
index 000000000000..1e6479a93ead
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_NIC_DEV_H
+#define HINIC_NIC_DEV_H
+
+#include <linux/netdevice.h>
+#include <linux/semaphore.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "ossl_knl.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_tx.h"
+#include "hinic_rx.h"
+
+#define HINIC_DRV_NAME "hinic"
+#define HINIC_CHIP_NAME "hinic"
+
+#define HINIC_DRV_VERSION "2.3.2.17"
+struct vf_data_storage;
+
+#define HINIC_FUNC_IS_VF(hwdev) (hinic_func_type(hwdev) == TYPE_VF)
+
+enum hinic_flags {
+ HINIC_INTF_UP,
+ HINIC_MAC_FILTER_CHANGED,
+ HINIC_LP_TEST,
+ HINIC_RSS_ENABLE,
+ HINIC_DCB_ENABLE,
+ HINIC_SAME_RXTX,
+ HINIC_INTR_ADAPT,
+ HINIC_UPDATE_MAC_FILTER,
+ HINIC_ETS_ENABLE,
+};
+
+#define RX_BUFF_NUM_PER_PAGE 2
+#define HINIC_MAX_MAC_NUM 3
+#define LP_PKT_CNT 64
+
+struct hinic_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 state;
+};
+
+enum hinic_rx_mode_state {
+ HINIC_HW_PROMISC_ON,
+ HINIC_HW_ALLMULTI_ON,
+ HINIC_PROMISC_FORCE_ON,
+ HINIC_ALLMULTI_FORCE_ON,
+};
+
+enum mac_filter_state {
+ HINIC_MAC_WAIT_HW_SYNC,
+ HINIC_MAC_HW_SYNCED,
+ HINIC_MAC_WAIT_HW_UNSYNC,
+ HINIC_MAC_HW_UNSYNCED,
+};
+
+struct hinic_mac_filter {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ unsigned long state;
+};
+
+/* TC bandwidth allocation per direction */
+struct hinic_tc_attr {
+ u8 pg_id; /* Priority Group(PG) ID */
+ u8 bw_pct; /* % of PG's bandwidth */
+ u8 up_map; /* User Priority to Traffic Class mapping */
+ u8 prio_type;
+};
+
+/* User priority configuration */
+struct hinic_tc_cfg {
+ struct hinic_tc_attr path[2]; /* One each for Tx/Rx */
+
+ bool pfc_en;
+};
+
+struct hinic_dcb_config {
+ u8 pg_tcs;
+ u8 pfc_tcs;
+
+ bool pfc_state;
+
+ struct hinic_tc_cfg tc_cfg[HINIC_DCB_TC_MAX];
+ u8 bw_pct[2][HINIC_DCB_PG_MAX]; /* One each for Tx/Rx */
+};
+
+enum hinic_intr_flags {
+ HINIC_INTR_ON,
+ HINIC_RESEND_ON,
+};
+
+struct hinic_irq {
+ struct net_device *netdev;
+ /* IRQ corresponding index number */
+ u16 msix_entry_idx;
+ u32 irq_id; /* The IRQ number from OS */
+ char irq_name[IFNAMSIZ + 16];
+ struct napi_struct napi;
+ cpumask_t affinity_mask;
+ struct hinic_txq *txq;
+ struct hinic_rxq *rxq;
+ unsigned long intr_flag;
+};
+
+struct hinic_intr_coal_info {
+ u8 pending_limt;
+ u8 coalesce_timer_cfg;
+ u8 resend_timer_cfg;
+
+ u64 pkt_rate_low;
+ u8 rx_usecs_low;
+ u8 rx_pending_limt_low;
+ u64 pkt_rate_high;
+ u8 rx_usecs_high;
+ u8 rx_pending_limt_high;
+
+ u8 user_set_intr_coal_flag;
+};
+
+#define HINIC_NIC_STATS_INC(nic_dev, field) \
+{ \
+ u64_stats_update_begin(&(nic_dev)->stats.syncp); \
+ (nic_dev)->stats.field++; \
+ u64_stats_update_end(&(nic_dev)->stats.syncp); \
+}
+
+struct hinic_nic_stats {
+ u64 netdev_tx_timeout;
+
+ /* Subdivision statistics show in private tool */
+ u64 tx_carrier_off_drop;
+ u64 tx_invalid_qid;
+
+ struct u64_stats_sync syncp;
+};
+
+struct hinic_nic_dev {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ void *hwdev;
+
+ int poll_weight;
+
+ unsigned long *vlan_bitmap;
+
+ u16 num_qps;
+ u16 max_qps;
+
+ u32 msg_enable;
+ unsigned long flags;
+
+ u16 sq_depth;
+ u16 rq_depth;
+
+ /* mapping from priority */
+ u8 sq_cos_mapping[HINIC_DCB_UP_MAX];
+ u8 default_cos_id;
+ struct hinic_txq *txqs;
+ struct hinic_rxq *rxqs;
+
+ struct nic_service_cap nic_cap;
+
+ struct irq_info *qps_irq_info;
+ struct hinic_irq *irq_cfg;
+ struct work_struct rx_mode_work;
+ struct delayed_work moderation_task;
+ struct workqueue_struct *workq;
+
+ struct list_head uc_filter_list;
+ struct list_head mc_filter_list;
+ unsigned long rx_mod_state;
+ int netdev_uc_cnt;
+ int netdev_mc_cnt;
+ int lb_test_rx_idx;
+ int lb_pkt_len;
+ u8 *lb_test_rx_buf;
+
+ u8 rss_tmpl_idx;
+ u16 num_rss;
+ u16 rss_limit;
+ u8 rss_hash_engine;
+ struct nic_rss_type rss_type;
+ u8 *rss_hkey_user;
+ /* hkey in big endian */
+ u32 *rss_hkey_user_be;
+ u32 *rss_indir_user;
+
+ u8 dcbx_cap;
+ u32 dcb_changes;
+ u8 max_cos;
+ u8 up_valid_bitmap;
+ u8 up_cos[HINIC_DCB_UP_MAX];
+ struct ieee_ets hinic_ieee_ets_default;
+ struct ieee_ets hinic_ieee_ets;
+ struct ieee_pfc hinic_ieee_pfc;
+ struct hinic_dcb_config dcb_cfg;
+ struct hinic_dcb_config tmp_dcb_cfg;
+ struct hinic_dcb_config save_dcb_cfg;
+ unsigned long dcb_flags;
+ int disable_port_cnt;
+ /* lock for disable or enable traffic flow */
+ struct semaphore dcb_sem;
+
+ bool heart_status;
+
+ struct hinic_intr_coal_info *intr_coalesce;
+ unsigned long last_moder_jiffies;
+ u32 adaptive_rx_coal;
+ u8 intr_coal_set_flag;
+ u32 his_link_speed;
+ /* interrupt coalesce must be different in virtual machine */
+ bool in_vm;
+ bool is_vm_slave;
+ int is_bm_slave;
+ struct hinic_nic_stats stats;
+ /* lock for nic resource */
+ struct mutex nic_mutex;
+ bool force_port_disable;
+ struct semaphore port_state_sem;
+ u8 link_status;
+
+ struct hinic_environment_info env_info;
+ struct hinic_adaptive_cfg adaptive_cfg;
+
+ /* pangea cpu affinity setting */
+ bool force_affinity;
+ cpumask_t affinity_mask;
+
+ u32 lro_replenish_thld;
+ u16 rx_buff_len;
+ u32 page_order;
+};
+
+extern struct hinic_uld_info nic_uld_info;
+
+int hinic_open(struct net_device *netdev);
+int hinic_close(struct net_device *netdev);
+void hinic_set_ethtool_ops(struct net_device *netdev);
+void hinicvf_set_ethtool_ops(struct net_device *netdev);
+void hinic_update_num_qps(struct net_device *netdev);
+int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size);
+
+int hinic_force_port_disable(struct hinic_nic_dev *nic_dev);
+int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable);
+int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable);
+void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status);
+
+int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev);
+int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev);
+
+#define hinic_msg(level, nic_dev, msglvl, format, arg...) \
+do { \
+ if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \
+ == NETREG_REGISTERED) \
+ nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \
+ format, ## arg); \
+ else \
+ nic_##level(&(nic_dev)->pdev->dev, \
+ format, ## arg); \
+} while (0)
+
+#define hinic_info(nic_dev, msglvl, format, arg...) \
+ hinic_msg(info, nic_dev, msglvl, format, ## arg)
+
+#define hinic_warn(nic_dev, msglvl, format, arg...) \
+ hinic_msg(warn, nic_dev, msglvl, format, ## arg)
+
+#define hinic_err(nic_dev, msglvl, format, arg...) \
+ hinic_msg(err, nic_dev, msglvl, format, ## arg)
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c
new file mode 100644
index 000000000000..1641b97f5339
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c
@@ -0,0 +1,1047 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_wq.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_mgmt_interface.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic.h"
+#include "hinic_ctx_def.h"
+#include "hinic_wq.h"
+#include "hinic_cmdq.h"
+
+#define HINIC_DEAULT_TX_CI_PENDING_LIMIT 0
+#define HINIC_DEAULT_TX_CI_COALESCING_TIME 0
+
+static unsigned char tx_pending_limit = HINIC_DEAULT_TX_CI_PENDING_LIMIT;
+module_param(tx_pending_limit, byte, 0444);
+MODULE_PARM_DESC(tx_pending_limit, "TX CI coalescing parameter pending_limit (default=0)");
+
+static unsigned char tx_coalescing_time = HINIC_DEAULT_TX_CI_COALESCING_TIME;
+module_param(tx_coalescing_time, byte, 0444);
+MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time (default=0)");
+
+#define WQ_PREFETCH_MAX 4
+#define WQ_PREFETCH_MIN 1
+#define WQ_PREFETCH_THRESHOLD 256
+
+struct hinic_qp_ctxt_header {
+ u16 num_queues;
+ u16 queue_type;
+ u32 addr_offset;
+};
+
+struct hinic_sq_ctxt {
+ u32 ceq_attr;
+
+ u32 ci_owner;
+
+ u32 wq_pfn_hi;
+ u32 wq_pfn_lo;
+
+ u32 pref_cache;
+ u32 pref_owner;
+ u32 pref_wq_pfn_hi_ci;
+ u32 pref_wq_pfn_lo;
+
+ u32 rsvd8;
+ u32 rsvd9;
+
+ u32 wq_block_pfn_hi;
+ u32 wq_block_pfn_lo;
+};
+
+struct hinic_rq_ctxt {
+ u32 ceq_attr;
+
+ u32 pi_intr_attr;
+
+ u32 wq_pfn_hi_ci;
+ u32 wq_pfn_lo;
+
+ u32 pref_cache;
+ u32 pref_owner;
+
+ u32 pref_wq_pfn_hi_ci;
+ u32 pref_wq_pfn_lo;
+
+ u32 pi_paddr_hi;
+ u32 pi_paddr_lo;
+
+ u32 wq_block_pfn_hi;
+ u32 wq_block_pfn_lo;
+};
+
+struct hinic_sq_ctxt_block {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+struct hinic_rq_ctxt_block {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+struct hinic_sq_db {
+ u32 db_info;
+};
+
+struct hinic_addr {
+ u32 addr_hi;
+ u32 addr_lo;
+};
+
+struct hinic_clean_queue_ctxt {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ u32 ctxt_size;
+ struct hinic_addr cqe_dma_addr[HINIC_RQ_CQ_MAX];
+};
+
+static int init_sq(struct hinic_sq *sq, struct hinic_wq *wq, u16 q_id,
+ u16 sq_msix_idx, void *cons_idx_addr, void __iomem *db_addr)
+{
+ sq->wq = wq;
+ sq->q_id = q_id;
+ sq->owner = 1;
+ sq->msix_entry_idx = sq_msix_idx;
+
+ sq->cons_idx_addr = cons_idx_addr;
+ sq->db_addr = db_addr;
+
+ return 0;
+}
+
+static int init_rq(struct hinic_rq *rq, void *dev_hdl, struct hinic_wq *wq,
+ u16 q_id, u16 rq_msix_idx)
+{
+ rq->wq = wq;
+ rq->q_id = q_id;
+ rq->cqe_dma_addr = 0;
+
+ rq->msix_entry_idx = rq_msix_idx;
+
+ rq->pi_virt_addr = dma_alloc_coherent(dev_hdl, PAGE_SIZE,
+ &rq->pi_dma_addr, GFP_KERNEL);
+ if (!rq->pi_virt_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void hinic_rq_cqe_addr_set(void *hwdev, u16 qid, dma_addr_t cqe_dma_ddr)
+{
+ struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+ struct hinic_nic_io *nic_io;
+
+ nic_io = dev->nic_io;
+ nic_io->qps[qid].rq.cqe_dma_addr = cqe_dma_ddr;
+}
+
+static void clean_rq(struct hinic_rq *rq, void *dev_hdl)
+{
+ dma_free_coherent(dev_hdl, PAGE_SIZE, rq->pi_virt_addr,
+ rq->pi_dma_addr);
+}
+
+static int create_qp(struct hinic_nic_io *nic_io, struct hinic_qp *qp,
+ u16 q_id, u16 qp_msix_idx, int max_sq_sge)
+{
+ struct hinic_sq *sq = &qp->sq;
+ struct hinic_rq *rq = &qp->rq;
+ void __iomem *db_addr;
+ int err;
+
+ err = hinic_wq_allocate(&nic_io->wqs, &nic_io->sq_wq[q_id],
+ HINIC_SQ_WQEBB_SIZE,
+ nic_io->hwdev->wq_page_size, nic_io->sq_depth,
+ MAX_WQE_SIZE(max_sq_sge, HINIC_SQ_WQEBB_SIZE));
+ if (err) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to allocate WQ for SQ\n");
+ return err;
+ }
+
+ err = hinic_wq_allocate(&nic_io->wqs, &nic_io->rq_wq[q_id],
+ HINIC_RQ_WQE_SIZE, nic_io->hwdev->wq_page_size,
+ nic_io->rq_depth, HINIC_RQ_WQE_SIZE);
+ if (err) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to allocate WQ for RQ\n");
+ goto rq_alloc_err;
+ }
+
+ /* we don't use direct wqe for sq */
+ err = hinic_alloc_db_addr(nic_io->hwdev, &db_addr, NULL);
+ if (err) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to alloc sq doorbell addr\n");
+ goto alloc_db_err;
+ }
+
+ err = init_sq(sq, &nic_io->sq_wq[q_id], q_id, qp_msix_idx,
+ HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id), db_addr);
+ if (err != 0) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to init sq\n");
+ goto sq_init_err;
+ }
+
+ err = init_rq(rq, nic_io->hwdev->dev_hdl, &nic_io->rq_wq[q_id],
+ q_id, qp_msix_idx);
+ if (err) {
+ nic_err(nic_io->hwdev->dev_hdl, "Failed to init rq\n");
+ goto rq_init_err;
+ }
+
+ return 0;
+
+rq_init_err:
+sq_init_err:
+ hinic_free_db_addr(nic_io->hwdev, db_addr, NULL);
+
+alloc_db_err:
+ hinic_wq_free(&nic_io->wqs, &nic_io->rq_wq[q_id]);
+
+rq_alloc_err:
+ hinic_wq_free(&nic_io->wqs, &nic_io->sq_wq[q_id]);
+
+ return err;
+}
+
+static void destroy_qp(struct hinic_nic_io *nic_io, struct hinic_qp *qp)
+{
+ clean_rq(&qp->rq, nic_io->hwdev->dev_hdl);
+
+ hinic_free_db_addr(nic_io->hwdev, qp->sq.db_addr, NULL);
+
+ hinic_wq_free(&nic_io->wqs, qp->sq.wq);
+ hinic_wq_free(&nic_io->wqs, qp->rq.wq);
+}
+
+/* alloc qps and init qps ctxt */
+int hinic_create_qps(void *dev, u16 num_qp, u16 sq_depth, u16 rq_depth,
+ struct irq_info *qps_msix_arry, int max_sq_sge)
+{
+ struct hinic_hwdev *hwdev = dev;
+ struct hinic_nic_io *nic_io;
+ u16 q_id, i, max_qps;
+ int err;
+
+ if (!hwdev || !qps_msix_arry)
+ return -EFAULT;
+
+ max_qps = hinic_func_max_qnum(hwdev);
+ if (num_qp > max_qps) {
+ nic_err(hwdev->dev_hdl, "Create number of qps: %d > max number of qps: %d\n",
+ num_qp, max_qps);
+ return -EINVAL;
+ }
+
+ nic_io = hwdev->nic_io;
+
+ nic_io->max_qps = max_qps;
+ nic_io->num_qps = num_qp;
+ nic_io->sq_depth = sq_depth;
+ nic_io->rq_depth = rq_depth;
+
+ err = hinic_wqs_alloc(&nic_io->wqs, 2 * num_qp, hwdev->dev_hdl);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to allocate WQS for IO\n");
+ return err;
+ }
+
+ nic_io->qps = kcalloc(num_qp, sizeof(*nic_io->qps), GFP_KERNEL);
+ if (!nic_io->qps) {
+ err = -ENOMEM;
+ goto alloc_qps_err;
+ }
+
+ nic_io->ci_vaddr_base =
+ dma_alloc_coherent(hwdev->dev_hdl,
+ CI_TABLE_SIZE(num_qp, PAGE_SIZE),
+ &nic_io->ci_dma_base, GFP_KERNEL);
+ if (!nic_io->ci_vaddr_base) {
+ err = -ENOMEM;
+ goto ci_base_err;
+ }
+
+ nic_io->sq_wq = kcalloc(num_qp, sizeof(*nic_io->sq_wq), GFP_KERNEL);
+ if (!nic_io->sq_wq) {
+ err = -ENOMEM;
+ goto sq_wq_err;
+ }
+
+ nic_io->rq_wq = kcalloc(num_qp, sizeof(*nic_io->rq_wq), GFP_KERNEL);
+ if (!nic_io->rq_wq) {
+ err = -ENOMEM;
+ goto rq_wq_err;
+ }
+
+ for (q_id = 0; q_id < num_qp; q_id++) {
+ err = create_qp(nic_io, &nic_io->qps[q_id], q_id,
+ qps_msix_arry[q_id].msix_entry_idx, max_sq_sge);
+ if (err) {
+ nic_err(hwdev->dev_hdl,
+ "Failed to allocate qp %d, err: %d\n",
+ q_id, err);
+ goto create_qp_err;
+ }
+ }
+
+ return 0;
+
+create_qp_err:
+ for (i = 0; i < q_id; i++)
+ destroy_qp(nic_io, &nic_io->qps[i]);
+
+ kfree(nic_io->rq_wq);
+
+rq_wq_err:
+ kfree(nic_io->sq_wq);
+
+sq_wq_err:
+ dma_free_coherent(hwdev->dev_hdl, CI_TABLE_SIZE(num_qp, PAGE_SIZE),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ci_base_err:
+ kfree(nic_io->qps);
+
+alloc_qps_err:
+ hinic_wqs_free(&nic_io->wqs);
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_create_qps);
+
+void hinic_free_qps(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+ struct hinic_nic_io *nic_io;
+ u16 i;
+
+ if (!hwdev)
+ return;
+
+ nic_io = hwdev->nic_io;
+
+ for (i = 0; i < nic_io->num_qps; i++)
+ destroy_qp(nic_io, &nic_io->qps[i]);
+
+ kfree(nic_io->rq_wq);
+ kfree(nic_io->sq_wq);
+
+ dma_free_coherent(hwdev->dev_hdl,
+ CI_TABLE_SIZE(nic_io->num_qps, PAGE_SIZE),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ kfree(nic_io->qps);
+
+ hinic_wqs_free(&nic_io->wqs);
+}
+EXPORT_SYMBOL(hinic_free_qps);
+
+void hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
+ enum hinic_qp_ctxt_type ctxt_type,
+ u16 num_queues, u16 max_queues, u16 q_id)
+{
+ qp_ctxt_hdr->queue_type = ctxt_type;
+ qp_ctxt_hdr->num_queues = num_queues;
+
+ if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
+ qp_ctxt_hdr->addr_offset =
+ SQ_CTXT_OFFSET(max_queues, max_queues, q_id);
+ else
+ qp_ctxt_hdr->addr_offset =
+ RQ_CTXT_OFFSET(max_queues, max_queues, q_id);
+
+ qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
+
+ hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
+}
+
+void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn,
+ struct hinic_sq_ctxt *sq_ctxt)
+{
+ struct hinic_wq *wq = sq->wq;
+ u64 wq_page_addr;
+ u64 wq_page_pfn, wq_block_pfn;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (u16)wq->cons_idx;
+ pi_start = (u16)wq->prod_idx;
+
+ /* read the first page from the HW table */
+ wq_page_addr = be64_to_cpu(*wq->block_vaddr);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ /* If only one page, use 0-level CLA */
+ if (wq->num_q_pages == 1)
+ wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
+ else
+ wq_block_pfn = WQ_BLOCK_PFN(wq->block_paddr);
+
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) |
+ SQ_CTXT_CEQ_ATTR_SET(0, EN);
+
+ sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) |
+ SQ_CTXT_CI_SET(1, OWNER);
+
+ sq_ctxt->wq_pfn_hi =
+ SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
+
+ sq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+ sq_ctxt->pref_cache =
+ SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ sq_ctxt->pref_owner = 1;
+
+ sq_ctxt->pref_wq_pfn_hi_ci =
+ SQ_CTXT_PREF_SET(ci_start, CI) |
+ SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI);
+
+ sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+ sq_ctxt->wq_block_pfn_hi =
+ SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+ sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+ hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
+}
+
+void hinic_rq_prepare_ctxt(struct hinic_rq *rq, struct hinic_rq_ctxt *rq_ctxt)
+{
+ struct hinic_wq *wq = rq->wq;
+ u64 wq_page_addr;
+ u64 wq_page_pfn, wq_block_pfn;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (u16)wq->cons_idx;
+ pi_start = (u16)wq->prod_idx;
+ pi_start = pi_start & wq->mask;
+
+ /* read the first page from the HW table */
+ wq_page_addr = be64_to_cpu(*wq->block_vaddr);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ if (wq->num_q_pages == 1)
+ wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
+ else
+ wq_block_pfn = WQ_BLOCK_PFN(wq->block_paddr);
+
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(1, OWNER);
+
+ rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |
+ RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR);
+
+ rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
+
+ rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+ rq_ctxt->pref_cache =
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ rq_ctxt->pref_owner = 1;
+
+ rq_ctxt->pref_wq_pfn_hi_ci =
+ RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+ RQ_CTXT_PREF_SET(ci_start, CI);
+
+ rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+ rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
+ rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
+
+ rq_ctxt->wq_block_pfn_hi =
+ RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+ rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+ hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
+}
+
+static int init_sq_ctxts(struct hinic_nic_io *nic_io)
+{
+ struct hinic_hwdev *hwdev = nic_io->hwdev;
+ struct hinic_sq_ctxt_block *sq_ctxt_block;
+ struct hinic_sq_ctxt *sq_ctxt;
+ struct hinic_cmd_buf *cmd_buf;
+ struct hinic_qp *qp;
+ u64 out_param = 0;
+ u16 q_id, curr_id, global_qpn, max_ctxts, i;
+ int err = 0;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ sq_ctxt_block = cmd_buf->buf;
+ sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC_Q_CTXT_MAX ?
+ HINIC_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
+ HINIC_QP_CTXT_TYPE_SQ, max_ctxts,
+ nic_io->max_qps, q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ qp = &nic_io->qps[curr_id];
+ global_qpn = nic_io->global_qpn + curr_id;
+
+ hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]);
+ }
+
+ cmd_buf->size = SQ_CTXT_SIZE(max_ctxts);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT,
+ cmd_buf, &out_param, 0);
+ if (err || out_param != 0) {
+ nic_err(hwdev->dev_hdl, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_rq_ctxts(struct hinic_nic_io *nic_io)
+{
+ struct hinic_hwdev *hwdev = nic_io->hwdev;
+ struct hinic_rq_ctxt_block *rq_ctxt_block;
+ struct hinic_rq_ctxt *rq_ctxt;
+ struct hinic_cmd_buf *cmd_buf;
+ struct hinic_qp *qp;
+ u64 out_param = 0;
+ u16 q_id, curr_id, max_ctxts, i;
+ int err = 0;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ rq_ctxt_block = cmd_buf->buf;
+ rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC_Q_CTXT_MAX ?
+ HINIC_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
+ HINIC_QP_CTXT_TYPE_RQ, max_ctxts,
+ nic_io->max_qps, q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ qp = &nic_io->qps[curr_id];
+
+ hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]);
+ }
+
+ cmd_buf->size = RQ_CTXT_SIZE(max_ctxts);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT,
+ cmd_buf, &out_param, 0);
+
+ if (err || out_param != 0) {
+ nic_err(hwdev->dev_hdl, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_qp_ctxts(struct hinic_nic_io *nic_io)
+{
+ int err;
+
+ err = init_sq_ctxts(nic_io);
+ if (err)
+ return err;
+
+ err = init_rq_ctxts(nic_io);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io,
+ enum hinic_qp_ctxt_type ctxt_type)
+{
+ struct hinic_hwdev *hwdev = nic_io->hwdev;
+ struct hinic_clean_queue_ctxt *ctxt_block;
+ struct hinic_cmd_buf *cmd_buf;
+ dma_addr_t cqe_dma_addr;
+ struct hinic_addr *addr;
+ u64 out_param = 0;
+ int i, err;
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctxt_block = cmd_buf->buf;
+ ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps;
+ ctxt_block->cmdq_hdr.queue_type = ctxt_type;
+ ctxt_block->cmdq_hdr.addr_offset = 0;
+
+ /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
+ ctxt_block->ctxt_size = 0x3;
+ if ((hinic_func_type(hwdev) == TYPE_VF) &&
+ ctxt_type == HINIC_QP_CTXT_TYPE_RQ) {
+ addr = ctxt_block->cqe_dma_addr;
+ for (i = 0; i < nic_io->max_qps; i++) {
+ cqe_dma_addr = nic_io->qps[i].rq.cqe_dma_addr;
+ addr[i].addr_hi = upper_32_bits(cqe_dma_addr);
+ addr[i].addr_lo = lower_32_bits(cqe_dma_addr);
+ }
+ }
+
+ hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
+
+ cmd_buf->size = sizeof(*ctxt_block);
+
+ err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+ HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+ cmd_buf, &out_param, 0);
+
+ if ((err) || (out_param)) {
+ nic_err(hwdev->dev_hdl, "Failed to clean queue offload ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ }
+
+ hinic_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io)
+{
+ /* clean LRO/TSO context space */
+ return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) ||
+ clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ));
+}
+
+/* init qps ctxt and set sq ci attr and arm all sq */
+int hinic_init_qp_ctxts(void *dev)
+{
+ struct hinic_hwdev *hwdev = dev;
+ struct hinic_nic_io *nic_io;
+ struct hinic_sq_attr sq_attr;
+ u16 q_id;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = hwdev->nic_io;
+
+ err = init_qp_ctxts(nic_io);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to init QP ctxts\n");
+ return err;
+ }
+
+ /* clean LRO/TSO context space */
+ err = clean_qp_offload_ctxt(nic_io);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to clean qp offload ctxts\n");
+ return err;
+ }
+
+ err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth,
+ nic_io->sq_depth, nic_io->rx_buff_len);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set root context\n");
+ return err;
+ }
+
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sq_attr.ci_dma_base =
+ HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2;
+ sq_attr.pending_limit = tx_pending_limit;
+ sq_attr.coalescing_time = tx_coalescing_time;
+ sq_attr.intr_en = 1;
+ sq_attr.intr_idx = nic_io->qps[q_id].sq.msix_entry_idx;
+ sq_attr.l2nic_sqn = q_id;
+ sq_attr.dma_attr_off = 0;
+ err = hinic_set_ci_table(hwdev, q_id, &sq_attr);
+ if (err) {
+ nic_err(hwdev->dev_hdl, "Failed to set ci table\n");
+ goto set_cons_idx_table_err;
+ }
+ }
+
+ return 0;
+
+set_cons_idx_table_err:
+ hinic_clean_root_ctxt(hwdev);
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_init_qp_ctxts);
+
+void hinic_free_qp_ctxts(void *hwdev)
+{
+ int err;
+
+ if (!hwdev)
+ return;
+
+ hinic_qps_num_set(hwdev, 0);
+
+ err = hinic_clean_root_ctxt(hwdev);
+ if (err)
+ nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Failed to clean root ctxt\n");
+}
+EXPORT_SYMBOL(hinic_free_qp_ctxts);
+
+int hinic_init_nic_hwdev(void *hwdev, u16 rx_buff_len)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_nic_io *nic_io;
+ u16 global_qpn;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ if (is_multi_bm_slave(hwdev) && hinic_support_dynamic_q(hwdev)) {
+ err = hinic_reinit_cmdq_ctxts(dev);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to reinit cmdq\n");
+ return err;
+ }
+ }
+
+ nic_io = dev->nic_io;
+
+ err = hinic_get_base_qpn(hwdev, &global_qpn);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to get base qpn\n");
+ return err;
+ }
+
+ nic_io->global_qpn = global_qpn;
+ nic_io->rx_buff_len = rx_buff_len;
+ err = hinic_init_function_table(hwdev, nic_io->rx_buff_len);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to init function table\n");
+ return err;
+ }
+
+ err = hinic_enable_fast_recycle(hwdev, false);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to disable fast recycle\n");
+ return err;
+ }
+
+ /* get default pf bandwidth from firmware witch setted by bios */
+ err = hinic_get_bios_pf_bw_limit(hwdev, &nic_io->nic_cfg.pf_bw_limit);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to get pf bandwidth limit\n");
+ return err;
+ }
+
+ if (dev->func_mode == FUNC_MOD_MULTI_BM_MASTER ||
+ dev->func_mode == FUNC_MOD_MULTI_VM_MASTER) {
+ if (hinic_func_type(dev) != TYPE_VF) {
+ err = hinic_disable_tx_promisc(dev);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to set tx promisc\n");
+ return err;
+ }
+ }
+ }
+
+ /* VFs don't set port routine command report */
+ if (hinic_func_type(dev) != TYPE_VF) {
+ /* Get the fw support mac reuse flag */
+ err = hinic_get_fw_support_func(hwdev);
+ if (err) {
+ nic_err(dev->dev_hdl, "Failed to get function capability\n");
+ return err;
+ }
+
+ /* Inform mgmt to send sfp's information to driver */
+ err = hinic_set_port_routine_cmd_report(hwdev, true);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(hinic_init_nic_hwdev);
+
+void hinic_free_nic_hwdev(void *hwdev)
+{
+ if (hinic_func_type(hwdev) != TYPE_VF)
+ hinic_set_port_routine_cmd_report(hwdev, false);
+}
+EXPORT_SYMBOL(hinic_free_nic_hwdev);
+
+int hinic_enable_tx_irq(void *hwdev, u16 q_id)
+{
+ return hinic_set_arm_bit(hwdev, HINIC_SET_ARM_SQ, q_id);
+}
+EXPORT_SYMBOL(hinic_enable_tx_irq);
+
+int hinic_rx_tx_flush(void *hwdev)
+{
+ return hinic_func_rx_tx_flush(hwdev);
+}
+EXPORT_SYMBOL(hinic_rx_tx_flush);
+
+int hinic_get_sq_free_wqebbs(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_wq *wq = &nic_io->sq_wq[q_id];
+
+ return atomic_read(&wq->delta) - 1;
+}
+EXPORT_SYMBOL(hinic_get_sq_free_wqebbs);
+
+int hinic_get_rq_free_wqebbs(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ return nic_io->rq_wq[q_id].delta.counter - 1;
+}
+EXPORT_SYMBOL(hinic_get_rq_free_wqebbs);
+
+u16 hinic_get_sq_local_ci(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ return (u16)(nic_io->sq_wq[q_id].cons_idx & nic_io->sq_wq[q_id].mask);
+}
+EXPORT_SYMBOL(hinic_get_sq_local_ci);
+
+u16 hinic_get_sq_hw_ci(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ return MASKED_SQ_IDX(sq, be16_to_cpu(*(u16 *)(sq->cons_idx_addr)));
+}
+EXPORT_SYMBOL(hinic_get_sq_hw_ci);
+
+void *hinic_get_sq_wqe(void *hwdev, u16 q_id, int wqebb_cnt, u16 *pi, u8 *owner)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+ void *wqe;
+
+ wqe = hinic_get_wqe(sq->wq, wqebb_cnt, pi);
+ if (wqe) {
+ *owner = sq->owner;
+ if ((*pi + wqebb_cnt) >= nic_io->sq_depth)
+ sq->owner = !sq->owner;
+ }
+
+ return wqe;
+}
+EXPORT_SYMBOL(hinic_get_sq_wqe);
+
+void hinic_return_sq_wqe(void *hwdev, u16 q_id, int num_wqebbs, u8 owner)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ if (owner != sq->owner)
+ sq->owner = owner;
+
+ atomic_add(num_wqebbs, &sq->wq->delta);
+ sq->wq->prod_idx -= num_wqebbs;
+}
+EXPORT_SYMBOL(hinic_return_sq_wqe);
+
+void hinic_update_sq_pi(void *hwdev, u16 q_id, int num_wqebbs, u16 *pi,
+ u8 *owner)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ *pi = MASKED_WQE_IDX(sq->wq, sq->wq->prod_idx);
+
+ atomic_sub(num_wqebbs, &sq->wq->delta);
+ sq->wq->prod_idx += num_wqebbs;
+
+ *owner = sq->owner;
+ if ((*pi + num_wqebbs) >= nic_io->sq_depth)
+ sq->owner = !sq->owner;
+}
+EXPORT_SYMBOL(hinic_update_sq_pi);
+
+static void sq_prepare_db(struct hinic_sq *sq, struct hinic_sq_db *db,
+ u16 prod_idx, int cos)
+{
+ u32 hi_prod_idx = SQ_DB_PI_HIGH(MASKED_SQ_IDX(sq, prod_idx));
+
+ db->db_info = SQ_DB_INFO_SET(hi_prod_idx, HI_PI) |
+ SQ_DB_INFO_SET(SQ_DB, TYPE) |
+ SQ_DB_INFO_SET(CFLAG_DATA_PATH, CFLAG) |
+ SQ_DB_INFO_SET(cos, COS) |
+ SQ_DB_INFO_SET(sq->q_id, QID);
+}
+
+static void sq_write_db(struct hinic_sq *sq, u16 prod_idx, int cos)
+{
+ struct hinic_sq_db sq_db;
+
+ sq_prepare_db(sq, &sq_db, prod_idx, cos);
+
+ /* Data should be written to HW in Big Endian Format */
+ sq_db.db_info = cpu_to_be32(sq_db.db_info);
+
+ wmb(); /* Write all before the doorbell */
+
+ writel(sq_db.db_info, SQ_DB_ADDR(sq, prod_idx));
+}
+
+void hinic_send_sq_wqe(void *hwdev, u16 q_id, void *wqe, int wqebb_cnt, int cos)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ if (wqebb_cnt != 1)
+ hinic_write_wqe(sq->wq, wqe, wqebb_cnt);
+
+ sq_write_db(sq, MASKED_SQ_IDX(sq, sq->wq->prod_idx), cos);
+}
+EXPORT_SYMBOL(hinic_send_sq_wqe);
+
+void hinic_update_sq_local_ci(void *hwdev, u16 q_id, int wqebb_cnt)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+ sq->wq->cons_idx += wqebb_cnt;
+ atomic_add(wqebb_cnt, &sq->wq->delta);
+}
+EXPORT_SYMBOL(hinic_update_sq_local_ci);
+
+void *hinic_get_rq_wqe(void *hwdev, u16 q_id, u16 *pi)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+ return hinic_get_wqe(rq->wq, 1, pi);
+}
+EXPORT_SYMBOL(hinic_get_rq_wqe);
+
+void hinic_return_rq_wqe(void *hwdev, u16 q_id, int num_wqebbs)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+ atomic_add(num_wqebbs, &rq->wq->delta);
+ rq->wq->prod_idx -= num_wqebbs;
+}
+EXPORT_SYMBOL(hinic_return_rq_wqe);
+
+void hinic_update_rq_delta(void *hwdev, u16 q_id, int num_wqebbs)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ nic_io->qps[q_id].rq.wq->delta.counter -= num_wqebbs;
+}
+EXPORT_SYMBOL(hinic_update_rq_delta);
+
+void hinic_update_rq_hw_pi(void *hwdev, u16 q_id, u16 pi)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+ struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+ *rq->pi_virt_addr = cpu_to_be16(pi & rq->wq->mask);
+}
+EXPORT_SYMBOL(hinic_update_rq_hw_pi);
+
+u16 hinic_get_rq_local_ci(void *hwdev, u16 q_id)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ return (u16)(nic_io->rq_wq[q_id].cons_idx & nic_io->rq_wq[q_id].mask);
+}
+EXPORT_SYMBOL(hinic_get_rq_local_ci);
+
+void hinic_update_rq_local_ci(void *hwdev, u16 q_id, int wqe_cnt)
+{
+ struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io;
+
+ nic_io->qps[q_id].rq.wq->cons_idx += wqe_cnt;
+ nic_io->qps[q_id].rq.wq->delta.counter += wqe_cnt;
+}
+EXPORT_SYMBOL(hinic_update_rq_local_ci);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h
new file mode 100644
index 000000000000..c38e356bb7cb
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_HW_NIC_IO_H_
+#define HINIC_HW_NIC_IO_H_
+
+#include "hinic_hw_mgmt.h"
+#include "hinic_qe_def.h"
+
+#define HINIC_RX_BUF_SHIFT 11
+#define HINIC_RX_BUF_LEN 2048 /* buffer len must be 2^n */
+
+#define SQ_CTRL_SET(val, member) ((u32)(val) << SQ_CTRL_##member##_SHIFT)
+
+int hinic_init_nic_hwdev(void *hwdev, u16 rx_buff_len);
+void hinic_free_nic_hwdev(void *hwdev);
+
+/* alloc qps resource */
+int hinic_create_qps(void *hwdev, u16 qp_num, u16 sq_depth, u16 rq_depth,
+ struct irq_info *rq_msix_arry, int max_sq_sge);
+void hinic_free_qps(void *hwdev);
+
+/* init qps ctxt and set sq ci attr and arm all sq */
+int hinic_init_qp_ctxts(void *hwdev);
+void hinic_free_qp_ctxts(void *hwdev);
+
+/* function table and root context set */
+int hinic_set_parameters(void *hwdev, u8 *mac, u16 rx_buf_size, u32 mtu);
+void hinic_clear_parameters(void *hwdev);
+
+/* The function is internally invoked. set_arm_bit function */
+int hinic_enable_tx_irq(void *hwdev, u16 q_id);
+
+int hinic_rx_tx_flush(void *hwdev);
+
+/* Obtain sq/rq number of idle wqebb */
+int hinic_get_sq_free_wqebbs(void *hwdev, u16 q_id);
+int hinic_get_rq_free_wqebbs(void *hwdev, u16 q_id);
+
+u16 hinic_get_sq_local_ci(void *hwdev, u16 q_id);
+u16 hinic_get_sq_hw_ci(void *hwdev, u16 q_id);
+
+void *hinic_get_sq_wqe(void *hwdev, u16 q_id,
+ int wqebb_cnt, u16 *pi, u8 *owner);
+
+void hinic_return_sq_wqe(void *hwdev, u16 q_id, int num_wqebbs, u8 owner);
+
+void hinic_update_sq_pi(void *hwdev, u16 q_id, int num_wqebbs,
+ u16 *pi, u8 *owner);
+
+/* including cross-page process and press the doorbell */
+void hinic_send_sq_wqe(void *hwdev, u16 q_id, void *wqe,
+ int wqebb_cnt, int cos);
+
+void hinic_update_sq_local_ci(void *hwdev, u16 q_id, int wqebb_cnt);
+
+/* Refreshes the rq buff */
+void *hinic_get_rq_wqe(void *hwdev, u16 q_id, u16 *pi);
+/* gupdate rq pi, is the latest pi, function does not need to calculate */
+void hinic_return_rq_wqe(void *hwdev, u16 q_id, int num_wqebbs);
+
+void hinic_update_rq_delta(void *hwdev, u16 q_id, int num_wqebbs);
+
+void hinic_update_rq_hw_pi(void *hwdev, u16 q_id, u16 pi);
+
+u16 hinic_get_rq_local_ci(void *hwdev, u16 q_id);
+
+/* Clear rx done is not performed */
+void hinic_update_rq_local_ci(void *hwdev, u16 q_id, int wqe_cnt);
+
+struct hinic_sge {
+ u32 hi_addr;
+ u32 lo_addr;
+ u32 len;
+};
+
+void hinic_cpu_to_be32(void *data, int len);
+
+void hinic_be32_to_cpu(void *data, int len);
+
+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len);
+
+dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge);
+
+void hinic_rq_cqe_addr_set(void *hwdev, u16 qid, dma_addr_t cqe_dma_ddr);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
new file mode 100644
index 000000000000..92a3d2517352
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
@@ -0,0 +1,2503 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <net/sock.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_lld.h"
+#include "hinic_nic_dev.h"
+#include "hinic_dbg.h"
+#include "hinic_nictool.h"
+#include "hinic_qp.h"
+#include "hinic_dcb.h"
+#include "hinic_dbgtool_knl.h"
+
+#define HIADM_DEV_PATH "/dev/nictool_dev"
+#define HIADM_DEV_CLASS "nictool_class"
+#define HIADM_DEV_NAME "nictool_dev"
+
+#define HINIC_CMDQ_BUF_MAX_SIZE 2048U
+#define MSG_MAX_IN_SIZE (2048 * 1024)
+#define MSG_MAX_OUT_SIZE (2048 * 1024)
+
+static dev_t g_dev_id = {0};
+/*lint -save -e104 -e808*/
+static struct class *g_nictool_class;
+/*lint -restore*/
+static struct cdev g_nictool_cdev;
+
+static int g_nictool_init_flag;
+static int g_nictool_ref_cnt;
+
+typedef int (*nic_driv_module)(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size);
+struct nic_drv_module_handle {
+ enum driver_cmd_type driv_cmd_name;
+ nic_driv_module driv_func;
+};
+
+typedef int (*hw_driv_module)(void *hwdev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size);
+struct hw_drv_module_handle {
+ enum driver_cmd_type driv_cmd_name;
+ hw_driv_module driv_func;
+};
+
+static void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in)
+{
+ if (!buf_in)
+ return;
+
+ if (nt_msg->module == SEND_TO_UCODE)
+ hinic_free_cmd_buf(hwdev, buf_in);
+ else
+ kfree(buf_in);
+}
+
+static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg,
+ u32 in_size, void **buf_in)
+{
+ void *msg_buf;
+
+ if (!in_size)
+ return 0;
+
+ if (nt_msg->module == SEND_TO_UCODE) {
+ struct hinic_cmd_buf *cmd_buf;
+
+ if (in_size > HINIC_CMDQ_BUF_MAX_SIZE) {
+ pr_err("Cmdq in size(%u) more than 2KB\n", in_size);
+ return -ENOMEM;
+ }
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ pr_err("Alloc cmdq cmd buffer failed in %s\n",
+ __func__);
+ return -ENOMEM;
+ }
+ msg_buf = cmd_buf->buf;
+ *buf_in = (void *)cmd_buf;
+ cmd_buf->size = (u16)in_size;
+ } else {
+ if (in_size > MSG_MAX_IN_SIZE) {
+ pr_err("In size(%u) more than 2M\n", in_size);
+ return -ENOMEM;
+ }
+ msg_buf = kzalloc(in_size, GFP_KERNEL);
+ *buf_in = msg_buf;
+ }
+ if (!(*buf_in)) {
+ pr_err("Alloc buffer in failed\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(msg_buf, nt_msg->in_buff, in_size)) {
+ pr_err("%s:%d: Copy from user failed\n",
+ __func__, __LINE__);
+ free_buff_in(hwdev, nt_msg, *buf_in);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void free_buff_out(void *hwdev, struct msg_module *nt_msg,
+ void *buf_out)
+{
+ if (!buf_out)
+ return;
+
+ if (nt_msg->module == SEND_TO_UCODE &&
+ !nt_msg->ucode_cmd.ucode_db.ucode_imm)
+ hinic_free_cmd_buf(hwdev, buf_out);
+ else
+ kfree(buf_out);
+}
+
+static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg,
+ u32 out_size, void **buf_out)
+{
+ if (!out_size)
+ return 0;
+
+ if (nt_msg->module == SEND_TO_UCODE &&
+ !nt_msg->ucode_cmd.ucode_db.ucode_imm) {
+ struct hinic_cmd_buf *cmd_buf;
+
+ if (out_size > HINIC_CMDQ_BUF_MAX_SIZE) {
+ pr_err("Cmdq out size(%u) more than 2KB\n", out_size);
+ return -ENOMEM;
+ }
+
+ cmd_buf = hinic_alloc_cmd_buf(hwdev);
+ *buf_out = (void *)cmd_buf;
+ } else {
+ if (out_size > MSG_MAX_OUT_SIZE) {
+ pr_err("out size(%u) more than 2M\n", out_size);
+ return -ENOMEM;
+ }
+ *buf_out = kzalloc(out_size, GFP_KERNEL);
+ }
+ if (!(*buf_out)) {
+ pr_err("Alloc buffer out failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int copy_buf_out_to_user(struct msg_module *nt_msg,
+ u32 out_size, void *buf_out)
+{
+ int ret = 0;
+ void *msg_out;
+
+ if (nt_msg->module == SEND_TO_UCODE &&
+ !nt_msg->ucode_cmd.ucode_db.ucode_imm)
+ msg_out = ((struct hinic_cmd_buf *)buf_out)->buf;
+ else
+ msg_out = buf_out;
+
+ if (copy_to_user(nt_msg->out_buf, msg_out, out_size))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
+ struct hinic_dbg_sq_info *sq_info,
+ u32 *msg_size);
+static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
+ struct hinic_dbg_rq_info *rq_info,
+ u32 *msg_size);
+
+static int get_tx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 q_id;
+ int err;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get tx info\n");
+ return -EFAULT;
+ }
+
+ if (!buf_in || !buf_out || in_size != sizeof(int))
+ return -EINVAL;
+
+ q_id = *((u16 *)buf_in);
+
+ err = hinic_dbg_get_sq_info(nic_dev, q_id, buf_out, out_size);
+
+ return err;
+}
+
+static int get_q_num(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 num_qp;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get queue number\n");
+ return -EFAULT;
+ }
+
+ if (!buf_out)
+ return -EFAULT;
+
+ num_qp = hinic_dbg_get_qp_num(nic_dev->hwdev);
+ if (!num_qp)
+ return -EFAULT;
+
+ if (*out_size != sizeof(u16)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+ *((u16 *)buf_out) = num_qp;
+
+ return 0;
+}
+
+static int get_tx_wqe_info(struct hinic_nic_dev *nic_dev,
+ void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ struct hinic_wqe_info *info = buf_in;
+ u16 q_id = 0;
+ u16 idx = 0, wqebb_cnt = 1;
+ int err;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get tx wqe info\n");
+ return -EFAULT;
+ }
+
+ if (!info || !buf_out || in_size != sizeof(*info))
+ return -EFAULT;
+
+ q_id = (u16)info->q_id;
+ idx = (u16)info->wqe_id;
+
+ err = hinic_dbg_get_sq_wqe_info(nic_dev->hwdev, q_id,
+ idx, wqebb_cnt,
+ buf_out, (u16 *)out_size);
+
+ return err;
+}
+
+static int get_rx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 q_id;
+ int err;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get rx info\n");
+ return -EFAULT;
+ }
+
+ if (!buf_in || !buf_out || in_size != sizeof(int))
+ return -EINVAL;
+
+ q_id = *((u16 *)buf_in);
+
+ err = hinic_dbg_get_rq_info(nic_dev, q_id, buf_out, out_size);
+
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "qid: %u, coalesc_timer:0x%x, pending_limit: 0x%x\n",
+ q_id, nic_dev->rxqs[q_id].last_coalesc_timer_cfg,
+ nic_dev->rxqs[q_id].last_pending_limt);
+ }
+
+ return err;
+}
+
+static int get_rx_wqe_info(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_wqe_info *info = buf_in;
+ u16 q_id = 0;
+ u16 idx = 0, wqebb_cnt = 1;
+ int err;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get rx wqe info\n");
+ return -EFAULT;
+ }
+
+ if (!info || !buf_out || in_size != sizeof(*info))
+ return -EFAULT;
+
+ q_id = (u16)info->q_id;
+ idx = (u16)info->wqe_id;
+
+ err = hinic_dbg_get_rq_wqe_info(nic_dev->hwdev, q_id,
+ idx, wqebb_cnt,
+ buf_out, (u16 *)out_size);
+
+ return err;
+}
+
+static int get_inter_num(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u16 intr_num;
+
+ intr_num = hinic_intr_num(nic_dev->hwdev);
+
+ if (*out_size != sizeof(u16)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+ *(u16 *)buf_out = intr_num;
+
+ return 0;
+}
+
+static void clean_nicdev_stats(struct hinic_nic_dev *nic_dev)
+{
+ u64_stats_update_begin(&nic_dev->stats.syncp);
+ nic_dev->stats.netdev_tx_timeout = 0;
+ nic_dev->stats.tx_carrier_off_drop = 0;
+ nic_dev->stats.tx_invalid_qid = 0;
+ u64_stats_update_end(&nic_dev->stats.syncp);
+}
+
+static int clear_func_static(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ int i;
+
+ if (*out_size != 0) {
+ pr_err("Unexpect out buf size from user: %d, expect: 0\n",
+ *out_size);
+ return -EINVAL;
+ }
+
+ clean_nicdev_stats(nic_dev);
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ hinic_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats);
+ hinic_txq_clean_stats(&nic_dev->txqs[i].txq_stats);
+ }
+
+ return 0;
+}
+
+static int get_num_cos(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u8 *num_cos = buf_out;
+
+ if (!buf_out || !out_size)
+ return -EINVAL;
+
+ if (*out_size != sizeof(*num_cos)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*num_cos));
+ return -EFAULT;
+ }
+
+ return hinic_get_num_cos(nic_dev, num_cos);
+}
+
+static int get_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_cos_up_map *map = buf_out;
+
+ if (!buf_out || !out_size)
+ return -EINVAL;
+
+ if (*out_size != sizeof(*map)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*map));
+ return -EFAULT;
+ }
+
+ return hinic_get_cos_up_map(nic_dev, &map->num_cos, map->cos_up);
+}
+
+static int set_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_cos_up_map *map = buf_in;
+
+ if (!buf_in || !out_size || in_size != sizeof(*map))
+ return -EINVAL;
+
+ if (*out_size != sizeof(*map)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*map));
+ return -EINVAL;
+ }
+
+ return hinic_set_cos_up_map(nic_dev, map->cos_up);
+}
+
+static int get_rx_cqe_info(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_wqe_info *info = buf_in;
+ u16 q_id = 0;
+ u16 idx = 0;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get rx cqe info\n");
+ return -EFAULT;
+ }
+
+ if (!info || !buf_out || in_size != sizeof(*info))
+ return -EFAULT;
+
+ if (*out_size != sizeof(struct hinic_rq_cqe)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(struct hinic_rq_cqe));
+ return -EFAULT;
+ }
+ q_id = (u16)info->q_id;
+ idx = (u16)info->wqe_id;
+
+ if (q_id >= nic_dev->num_qps || idx >= nic_dev->rxqs[q_id].q_depth)
+ return -EFAULT;
+
+ memcpy(buf_out, nic_dev->rxqs[q_id].rx_info[idx].cqe,
+ sizeof(struct hinic_rq_cqe));
+
+ return 0;
+}
+
+static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
+ struct hinic_dbg_sq_info *sq_info,
+ u32 *msg_size)
+{
+ int err;
+
+ if (!nic_dev)
+ return -EINVAL;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get sq info\n");
+ return -EFAULT;
+ }
+
+ if (q_id >= nic_dev->num_qps) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Input queue id is larger than the actual queue number\n");
+ return -EINVAL;
+ }
+
+ if (*msg_size != sizeof(*sq_info)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *msg_size, sizeof(*sq_info));
+ return -EFAULT;
+ }
+ sq_info->q_id = q_id;
+ sq_info->pi = hinic_dbg_get_sq_pi(nic_dev->hwdev, q_id);
+ sq_info->ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id);
+ sq_info->fi = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id);
+
+ sq_info->q_depth = nic_dev->txqs[q_id].q_depth;
+ /* pi_reverse */
+
+ sq_info->weqbb_size = HINIC_SQ_WQEBB_SIZE;
+ /* priority */
+
+ sq_info->ci_addr = hinic_dbg_get_sq_ci_addr(nic_dev->hwdev, q_id);
+
+ sq_info->cla_addr = hinic_dbg_get_sq_cla_addr(nic_dev->hwdev, q_id);
+ sq_info->slq_handle = hinic_dbg_get_sq_wq_handle(nic_dev->hwdev, q_id);
+
+ /* direct wqe */
+
+ err = hinic_dbg_get_sq_db_addr(nic_dev->hwdev,
+ q_id, &sq_info->db_addr.map_addr,
+ &sq_info->db_addr.phy_addr,
+ &sq_info->pg_idx);
+
+ sq_info->glb_sq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id;
+
+ return err;
+}
+
+static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id,
+ struct hinic_dbg_rq_info *rq_info,
+ u32 *msg_size)
+{
+ if (!nic_dev)
+ return -EINVAL;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't get rq info\n");
+ return -EFAULT;
+ }
+
+ if (q_id >= nic_dev->num_qps) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Input queue id is larger than the actual queue number\n");
+ return -EINVAL;
+ }
+ if (*msg_size != sizeof(*rq_info)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *msg_size, sizeof(*rq_info));
+ return -EFAULT;
+ }
+
+ rq_info->q_id = q_id;
+ rq_info->glb_rq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id;
+
+ rq_info->hw_pi = hinic_dbg_get_rq_hw_pi(nic_dev->hwdev, q_id);
+ rq_info->ci = (u16)nic_dev->rxqs[q_id].cons_idx &
+ nic_dev->rxqs[q_id].q_mask;
+
+ rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update;
+
+ rq_info->wqebb_size = HINIC_RQ_WQE_SIZE;
+ rq_info->q_depth = nic_dev->rxqs[q_id].q_depth;
+
+ rq_info->buf_len = nic_dev->rxqs[q_id].buf_len;
+
+ rq_info->slq_handle = hinic_dbg_get_rq_wq_handle(nic_dev->hwdev, q_id);
+ if (!rq_info->slq_handle) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Get rq slq handle null\n");
+ return -EFAULT;
+ }
+ rq_info->ci_wqe_page_addr =
+ hinic_slq_get_first_pageaddr(rq_info->slq_handle);
+ rq_info->ci_cla_tbl_addr =
+ hinic_dbg_get_rq_cla_addr(nic_dev->hwdev, q_id);
+
+ rq_info->msix_idx = nic_dev->rxqs[q_id].msix_entry_idx;
+ rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id;
+
+ return 0;
+}
+
+static int get_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_nic_loop_mode *mode = buf_out;
+ int err;
+
+ if (!out_size || !mode)
+ return -EFAULT;
+
+ if (*out_size != sizeof(*mode)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*mode));
+ return -EFAULT;
+ }
+ err = hinic_get_loopback_mode_ex(nic_dev->hwdev, &mode->loop_mode,
+ &mode->loop_ctrl);
+ return err;
+}
+
+static int set_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_nic_loop_mode *mode = buf_in;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't set loopback mode\n");
+ return -EFAULT;
+ }
+
+ if (!mode || !out_size || in_size != sizeof(*mode))
+ return -EFAULT;
+
+ if (*out_size != sizeof(*mode)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*mode));
+ return -EINVAL;
+ }
+
+ return hinic_set_loopback_mode_ex(nic_dev->hwdev, mode->loop_mode,
+ mode->loop_ctrl);
+}
+
+static int set_link_mode(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ enum hinic_nic_link_mode *link = buf_in;
+ u8 link_status;
+
+ if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Netdev is down, can't set link mode\n");
+ return -EFAULT;
+ }
+
+ if (!link || !out_size || in_size != sizeof(*link))
+ return -EFAULT;
+
+ if (*out_size != sizeof(*link)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*link));
+ return -EINVAL;
+ }
+
+ switch (*link) {
+ case HINIC_LINK_MODE_AUTO:
+ if (hinic_get_link_state(nic_dev->hwdev, &link_status))
+ link_status = false;
+ hinic_link_status_change(nic_dev, (bool)link_status);
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "Set link mode: auto succeed, now is link %s\n",
+ (link_status ? "up" : "down"));
+ break;
+ case HINIC_LINK_MODE_UP:
+ hinic_link_status_change(nic_dev, true);
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "Set link mode: up succeed\n");
+ break;
+ case HINIC_LINK_MODE_DOWN:
+ hinic_link_status_change(nic_dev, false);
+ nicif_info(nic_dev, drv, nic_dev->netdev,
+ "Set link mode: down succeed\n");
+ break;
+ default:
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Invalid link mode %d to set\n", *link);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_dcb_cfg(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ union _dcb_ctl dcb_ctl = {.data = 0};
+ int err;
+
+ if (!buf_in || !buf_out || *out_size != sizeof(u32) ||
+ in_size != sizeof(u32))
+ return -EINVAL;
+
+ dcb_ctl.data = *((u32 *)buf_in);
+
+ err = hinic_setup_dcb_tool(nic_dev->netdev,
+ &dcb_ctl.dcb_data.dcb_en,
+ !!dcb_ctl.dcb_data.wr_flag);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to setup dcb state to %d\n",
+ !!dcb_ctl.dcb_data.dcb_en);
+ err = EINVAL;
+ }
+ dcb_ctl.dcb_data.err = (u8)err;
+ *((u32 *)buf_out) = (u32)dcb_ctl.data;
+
+ return 0;
+}
+
+int get_pfc_info(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ union _pfc pfc = {.data = 0};
+
+ if (!buf_in || !buf_out || *out_size != sizeof(u32) ||
+ in_size != sizeof(u32))
+ return -EINVAL;
+
+ pfc.data = *((u32 *)buf_in);
+
+ hinic_dcbnl_set_pfc_en_tool(nic_dev->netdev,
+ &pfc.pfc_data.pfc_en, false);
+ hinic_dcbnl_get_pfc_cfg_tool(nic_dev->netdev,
+ &pfc.pfc_data.pfc_priority);
+ hinic_dcbnl_get_tc_num_tool(nic_dev->netdev,
+ &pfc.pfc_data.num_of_tc);
+ *((u32 *)buf_out) = (u32)pfc.data;
+
+ return 0;
+}
+
+int set_pfc_control(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u8 pfc_en = 0;
+ u8 err = 0;
+
+ if (!buf_in || !buf_out || *out_size != sizeof(u8) ||
+ in_size != sizeof(u8))
+ return -EINVAL;
+
+ pfc_en = *((u8 *)buf_in);
+ if (!(test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Need to enable dcb first\n");
+ err = 0xff;
+ goto exit;
+ }
+
+ hinic_dcbnl_set_pfc_en_tool(nic_dev->netdev, &pfc_en, true);
+ err = hinic_dcbnl_set_pfc_tool(nic_dev->netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to set pfc to %s\n",
+ pfc_en ? "enable" : "disable");
+ }
+
+exit:
+ *((u8 *)buf_out) = (u8)err;
+
+ return 0;
+}
+
+int set_ets(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct _ets ets = {0};
+ u8 err = 0;
+ u8 i;
+ u8 support_tc = nic_dev->max_cos;
+
+ if (!buf_in || !buf_out || *out_size != sizeof(u8) ||
+ in_size != sizeof(struct _ets))
+ return -EINVAL;
+
+ memcpy(&ets, buf_in, sizeof(struct _ets));
+
+ if (!(test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Need to enable dcb first\n");
+ err = 0xff;
+ goto exit;
+ }
+ if (ets.flag_com.ets_flag.flag_ets_enable) {
+ hinic_dcbnl_set_ets_en_tool(nic_dev->netdev, &ets.ets_en, true);
+
+ if (!ets.ets_en)
+ goto exit;
+ }
+
+ if (!(test_bit(HINIC_ETS_ENABLE, &nic_dev->flags))) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Need to enable ets first\n");
+ err = 0xff;
+ goto exit;
+ }
+
+ if (ets.flag_com.ets_flag.flag_ets_cos) {
+ for (i = 0; i < HINIC_DCB_COS_MAX; i++) {
+ if (ets.tc[i] >= HINIC_DCB_TC_MAX) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "ETS tc id %d out of range\n",
+ ets.tc[i]);
+ err = 0xFF;
+ goto exit;
+ }
+ }
+ hinic_dcbnl_set_ets_tc_tool(nic_dev->netdev, ets.tc, true);
+ }
+
+ if (ets.flag_com.ets_flag.flag_ets_percent) {
+ for (i = support_tc; i < HINIC_DCB_TC_MAX; i++) {
+ if (ets.ets_percent[i]) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "ETS setting out of range\n");
+ break;
+ }
+ }
+
+ hinic_dcbnl_set_ets_pecent_tool(nic_dev->netdev,
+ ets.ets_percent, true);
+ }
+
+ if (ets.flag_com.ets_flag.flag_ets_strict)
+ hinic_dcbnl_set_ets_strict_tool(nic_dev->netdev,
+ &ets.strict, true);
+
+ err = hinic_dcbnl_set_ets_tool(nic_dev->netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to set ets [%d]\n", err);
+ }
+exit:
+ *((u8 *)buf_out) = err;
+
+ return 0;
+}
+
+int get_support_up(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u8 *up_num = buf_out;
+ u8 support_up = 0;
+ u8 i;
+ u8 up_valid_bitmap = nic_dev->up_valid_bitmap;
+
+ if (!buf_in || !buf_out || !out_size)
+ return -EINVAL;
+
+ if (*out_size != sizeof(*up_num)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*up_num));
+ return -EFAULT;
+ }
+
+ for (i = 0; i < HINIC_DCB_UP_MAX; i++) {
+ if (up_valid_bitmap & BIT(i))
+ support_up++;
+ }
+
+ *up_num = support_up;
+
+ return 0;
+}
+
+int get_support_tc(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u8 *tc_num = buf_out;
+
+ if (!buf_in || !buf_out || !out_size)
+ return -EINVAL;
+
+ if (*out_size != sizeof(*tc_num)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*tc_num));
+ return -EFAULT;
+ }
+
+ hinic_dcbnl_get_tc_num_tool(nic_dev->netdev, tc_num);
+
+ return 0;
+}
+
+int get_ets_info(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct _ets *ets = buf_out;
+
+ if (!buf_in || !buf_out || *out_size != sizeof(*ets))
+ return -EINVAL;
+
+ hinic_dcbnl_set_ets_pecent_tool(nic_dev->netdev,
+ ets->ets_percent, false);
+ hinic_dcbnl_set_ets_tc_tool(nic_dev->netdev, ets->tc, false);
+ hinic_dcbnl_set_ets_en_tool(nic_dev->netdev, &ets->ets_en, false);
+ hinic_dcbnl_set_ets_strict_tool(nic_dev->netdev, &ets->strict, false);
+ ets->err = 0;
+
+ return 0;
+}
+
+int set_pfc_priority(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u8 pfc_prority = 0;
+ u8 err = 0;
+
+ if (!buf_in || !buf_out || *out_size != sizeof(u8) ||
+ in_size != sizeof(u8))
+ return -EINVAL;
+
+ pfc_prority = *((u8 *)buf_in);
+ if (!((test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) &&
+ nic_dev->tmp_dcb_cfg.pfc_state)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Need to enable pfc first\n");
+ err = 0xff;
+ goto exit;
+ }
+
+ hinic_dcbnl_set_pfc_cfg_tool(nic_dev->netdev, pfc_prority);
+
+ err = hinic_dcbnl_set_pfc_tool(nic_dev->netdev);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to set pfc to %x priority\n",
+ pfc_prority);
+ }
+exit:
+ *((u8 *)buf_out) = (u8)err;
+
+ return 0;
+}
+
+static int set_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u32 pf_bw_limit = 0;
+ int err;
+
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "To set VF bandwidth rate, please use ip link cmd\n");
+ return -EINVAL;
+ }
+
+ if (!buf_in || !buf_out || in_size != sizeof(u32) ||
+ *out_size != sizeof(u8))
+ return -EINVAL;
+
+ pf_bw_limit = *((u32 *)buf_in);
+
+ err = hinic_set_pf_bw_limit(nic_dev->hwdev, pf_bw_limit);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to set pf bandwidth limit to %d%%\n",
+ pf_bw_limit);
+ if (err < 0)
+ return err;
+ }
+
+ *((u8 *)buf_out) = (u8)err;
+
+ return 0;
+}
+
+static int get_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u32 pf_bw_limit = 0;
+ int err;
+
+ if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "To get VF bandwidth rate, please use ip link cmd\n");
+ return -EINVAL;
+ }
+
+ if (!buf_out || *out_size != sizeof(u32)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(u32));
+ return -EFAULT;
+ }
+ err = hinic_dbg_get_pf_bw_limit(nic_dev->hwdev, &pf_bw_limit);
+ if (err)
+ return err;
+
+ *((u32 *)buf_out) = pf_bw_limit;
+
+ return 0;
+}
+
+static int get_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_nic_poll_weight *weight_info = buf_out;
+
+ if (!buf_out || *out_size != sizeof(*weight_info)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user :%d, expect: %lu\n",
+ *out_size, sizeof(*weight_info));
+ return -EFAULT;
+ }
+ weight_info->poll_weight = nic_dev->poll_weight;
+ return 0;
+}
+
+static int set_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_nic_poll_weight *weight_info = buf_in;
+
+ if (!buf_in || in_size != sizeof(*weight_info) ||
+ *out_size != sizeof(u32)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect in buf size: %u or out buf size: %d from user, expect: %lu\n",
+ in_size, *out_size, sizeof(*weight_info));
+ return -EFAULT;
+ }
+
+ nic_dev->poll_weight = weight_info->poll_weight;
+
+ return 0;
+}
+
+static int get_homologue(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_homologues *homo = buf_out;
+
+ if (!buf_out || *out_size != sizeof(*homo)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*homo));
+ return -EFAULT;
+ }
+
+ if (test_bit(HINIC_SAME_RXTX, &nic_dev->flags))
+ homo->homo_state = HINIC_HOMOLOGUES_ON;
+ else
+ homo->homo_state = HINIC_HOMOLOGUES_OFF;
+
+ return 0;
+}
+
+static int set_homologue(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_homologues *homo = buf_in;
+
+ if (!buf_in || in_size != sizeof(*homo) ||
+ *out_size != sizeof(*homo)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect in buf size: %d or out buf size: %d from user, expect: %lu\n",
+ in_size, *out_size, sizeof(*homo));
+ return -EFAULT;
+ }
+
+ if (homo->homo_state == HINIC_HOMOLOGUES_ON) {
+ set_bit(HINIC_SAME_RXTX, &nic_dev->flags);
+ } else if (homo->homo_state == HINIC_HOMOLOGUES_OFF) {
+ clear_bit(HINIC_SAME_RXTX, &nic_dev->flags);
+ } else {
+ pr_err("Invalid parameters\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int get_sset_count(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ u32 count;
+
+ if (!buf_in || !buf_out || in_size != sizeof(u32) ||
+ *out_size != sizeof(u32)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ switch (*((u32 *)buf_in)) {
+ case HINIC_SHOW_SSET_IO_STATS:
+ count = hinic_get_io_stats_size(nic_dev);
+ break;
+
+ default:
+ count = 0;
+ break;
+ }
+
+ *((u32 *)buf_out) = count;
+
+ return 0;
+}
+
+static int get_sset_stats(struct hinic_nic_dev *nic_dev, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct hinic_show_item *items = buf_out;
+ u32 sset, count, size;
+ int err;
+
+ if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out)
+ return -EINVAL;
+
+ size = sizeof(u32);
+ err = get_sset_count(nic_dev, buf_in, in_size, &count, &size);
+ if (err)
+ return -EINVAL;
+
+ if (count * sizeof(*items) != *out_size) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, count * sizeof(*items));
+ return -EINVAL;
+ }
+
+ sset = *((u32 *)buf_in);
+
+ switch (sset) {
+ case HINIC_SHOW_SSET_IO_STATS:
+ hinic_get_io_stats(nic_dev, items);
+ break;
+
+ default:
+ nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %d to get stats\n",
+ sset);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int get_func_type(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 func_typ;
+
+ func_typ = hinic_func_type(hwdev);
+ if (!buf_out || *out_size != sizeof(u16)) {
+ pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+ *(u16 *)buf_out = func_typ;
+ return 0;
+}
+
+static int get_func_id(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 func_id;
+
+ if (!buf_out || *out_size != sizeof(u16)) {
+ pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+
+ func_id = hinic_global_func_id_hw(hwdev);
+ *(u16 *)buf_out = func_id;
+
+ return 0;
+}
+
+static int get_chip_faults_stats(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ int offset = 0;
+ struct chip_fault_stats *fault_info;
+
+ if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) ||
+ in_size != sizeof(*fault_info)) {
+ pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*fault_info));
+ return -EFAULT;
+ }
+ fault_info = (struct chip_fault_stats *)buf_in;
+ offset = fault_info->offset;
+ fault_info = (struct chip_fault_stats *)buf_out;
+ hinic_get_chip_fault_stats(hwdev, fault_info->chip_faults, offset);
+
+ return 0;
+}
+
+static int get_hw_stats(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ return hinic_dbg_get_hw_stats(hwdev, buf_out, (u16 *)out_size);
+}
+
+static int clear_hw_stats(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ return hinic_dbg_clear_hw_stats(hwdev, out_size);
+}
+
+static int get_drv_version(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ struct drv_version_info *ver_info;
+ char ver_str[MAX_VER_INFO_LEN] = {0};
+ int err;
+
+ if (*out_size != sizeof(*ver_info)) {
+ pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(*ver_info));
+ return -EFAULT;
+ }
+ err = snprintf(ver_str, sizeof(ver_str),
+ "%s [compiled with the kernel]", HINIC_DRV_VERSION);
+ if (err <= 0 || err >= MAX_VER_INFO_LEN) {
+ pr_err("Failed snprintf driver version, function return(%d) and dest_len(%d)\n",
+ err, MAX_VER_INFO_LEN);
+ return -EFAULT;
+ }
+ ver_info = (struct drv_version_info *)buf_out;
+ memcpy(ver_info->ver, ver_str, sizeof(ver_str));
+
+ return 0;
+}
+
+static int get_self_test(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ return 0;
+}
+
+static int get_chip_id_test(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ return 0;
+}
+
+static int get_single_card_info(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ if (!buf_in || !buf_out || in_size != sizeof(struct card_info) ||
+ *out_size != sizeof(struct card_info)) {
+ pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(struct card_info));
+ return -EFAULT;
+ }
+
+ hinic_get_card_info(hwdev, buf_out);
+
+ return 0;
+}
+
+static int get_device_id(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u16 dev_id;
+ int err;
+
+ if (!buf_out || *out_size != sizeof(u16)) {
+ pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
+ *out_size, sizeof(u16));
+ return -EFAULT;
+ }
+
+ err = hinic_get_device_id(hwdev, &dev_id);
+ if (err)
+ return err;
+
+ *((u32 *)buf_out) = dev_id;
+
+ return 0;
+}
+
+static int is_driver_in_vm(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ bool in_host;
+
+ if (!buf_out || (*out_size != sizeof(u8)))
+ return -EINVAL;
+
+ in_host = hinic_is_in_host();
+ if (in_host)
+ *((u8 *)buf_out) = 0;
+ else
+ *((u8 *)buf_out) = 1;
+
+ return 0;
+}
+
+static int get_pf_id(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ struct hinic_pf_info *pf_info;
+ u32 port_id = 0;
+ int err;
+
+ if (!buf_out || (*out_size != sizeof(*pf_info)) ||
+ !buf_in || in_size != sizeof(u32))
+ return -EINVAL;
+
+ port_id = *((u32 *)buf_in);
+ pf_info = (struct hinic_pf_info *)buf_out;
+ err = hinic_get_pf_id(hwdev, port_id, &pf_info->pf_id,
+ &pf_info->isvalid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int __get_card_usr_api_chain_mem(int card_idx)
+{
+ unsigned char *tmp;
+ int i;
+
+ mutex_lock(&g_hinic_addr_lock);
+ g_hinic_card_id = card_idx;
+ if (!g_hinic_card_vir_addr[card_idx]) {
+ g_hinic_card_vir_addr[card_idx] =
+ (void *)__get_free_pages(GFP_KERNEL,
+ DBGTOOL_PAGE_ORDER);
+ if (!g_hinic_card_vir_addr[card_idx]) {
+ pr_err("Alloc api chain memory fail for card %d\n",
+ card_idx);
+ mutex_unlock(&g_hinic_addr_lock);
+ return -EFAULT;
+ }
+
+ memset(g_hinic_card_vir_addr[card_idx], 0,
+ PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER));
+
+ g_hinic_card_phy_addr[card_idx] =
+ virt_to_phys(g_hinic_card_vir_addr[card_idx]);
+ if (!g_hinic_card_phy_addr[card_idx]) {
+ pr_err("phy addr for card %d is 0\n", card_idx);
+ free_pages((unsigned long)g_hinic_card_vir_addr[card_idx],
+ DBGTOOL_PAGE_ORDER);
+ g_hinic_card_vir_addr[card_idx] = NULL;
+ mutex_unlock(&g_hinic_addr_lock);
+ return -EFAULT;
+ }
+
+ tmp = g_hinic_card_vir_addr[card_idx];
+ for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) {
+ SetPageReserved(virt_to_page(tmp));
+ tmp += PAGE_SIZE;
+ }
+ }
+ mutex_unlock(&g_hinic_addr_lock);
+
+ return 0;
+}
+
+static int get_pf_dev_info(char *dev_name, struct msg_module *nt_msg)
+{
+ struct pf_dev_info dev_info[16] = { {0} };
+ struct card_node *card_info = NULL;
+ int i;
+ int err;
+
+ if (nt_msg->len_info.out_buff_len != sizeof(dev_info) ||
+ nt_msg->len_info.in_buff_len != sizeof(dev_info)) {
+ pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n",
+ nt_msg->len_info.out_buff_len,
+ nt_msg->len_info.in_buff_len, (sizeof(dev_info) * 16));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_CARD_NUM; i++) {
+ card_info = (struct card_node *)g_hinic_card_node_array[i];
+ if (!card_info)
+ continue;
+ if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ))
+ break;
+ }
+
+ if (i == MAX_CARD_NUM || !card_info) {
+ pr_err("Can't find this card %s\n", dev_name);
+ return -EFAULT;
+ }
+
+ err = __get_card_usr_api_chain_mem(i);
+ if (err) {
+ pr_err("Faile to get api chain memory for userspace %s\n",
+ dev_name);
+ return -EFAULT;
+ }
+
+ hinic_chipif_get_all_pf_dev_info(dev_info, i,
+ card_info->func_handle_array);
+
+ /* Copy the dev_info to user mode */
+ if (copy_to_user(nt_msg->out_buf, dev_info, sizeof(dev_info))) {
+ pr_err("Copy dev_info to user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int knl_free_mem(char *dev_name, struct msg_module *nt_msg)
+{
+ struct card_node *card_info = NULL;
+ int i;
+
+ for (i = 0; i < MAX_CARD_NUM; i++) {
+ card_info = (struct card_node *)g_hinic_card_node_array[i];
+ if (!card_info)
+ continue;
+ if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ))
+ break;
+ }
+
+ if (i == MAX_CARD_NUM || !card_info) {
+ pr_err("Can't find this card %s\n", dev_name);
+ return -EFAULT;
+ }
+
+ hinic_dbgtool_knl_free_mem(i);
+
+ return 0;
+}
+
+extern void hinic_get_card_func_info_by_card_name(const char *chip_name,
+ struct hinic_card_func_info
+ *card_func);
+
+static int get_card_func_info(char *dev_name, struct msg_module *nt_msg)
+{
+ struct hinic_card_func_info card_func_info = {0};
+ int id, err;
+
+ if (nt_msg->len_info.out_buff_len != sizeof(card_func_info) ||
+ nt_msg->len_info.in_buff_len != sizeof(card_func_info)) {
+ pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n",
+ nt_msg->len_info.out_buff_len,
+ nt_msg->len_info.in_buff_len, sizeof(card_func_info));
+ return -EINVAL;
+ }
+
+ err = memcmp(dev_name, HINIC_CHIP_NAME, strlen(HINIC_CHIP_NAME));
+ if (err) {
+ pr_err("Invalid chip name %s\n", dev_name);
+ return err;
+ }
+
+ err = sscanf(dev_name, HINIC_CHIP_NAME "%d", &id);
+ if (err <= 0) {
+ pr_err("Failed to get hinic id\n");
+ return err;
+ }
+
+ if (id >= MAX_CARD_NUM) {
+ pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1);
+ return -EINVAL;
+ }
+
+ hinic_get_card_func_info_by_card_name(dev_name, &card_func_info);
+
+ if (!card_func_info.num_pf) {
+ pr_err("None function found for %s\n", dev_name);
+ return -EFAULT;
+ }
+
+ err = __get_card_usr_api_chain_mem(id);
+ if (err) {
+ pr_err("Faile to get api chain memory for userspace %s\n",
+ dev_name);
+ return -EFAULT;
+ }
+
+ card_func_info.usr_api_phy_addr = g_hinic_card_phy_addr[id];
+
+ /* Copy the dev_info to user mode */
+ if (copy_to_user(nt_msg->out_buf, &card_func_info,
+ sizeof(card_func_info))) {
+ pr_err("Copy dev_info to user fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT 30
+static int get_firmware_active_status(void *hwdev, void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ u32 loop_cnt = 0;
+
+ if (*out_size != 0) {
+ pr_err("Unexpect out buf size from user: %d, expect: 0\n",
+ *out_size);
+ return -EINVAL;
+ }
+
+ while (loop_cnt < GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) {
+ if (!hinic_get_mgmt_channel_status(hwdev))
+ return 0;
+
+ msleep(1000);
+ loop_cnt++;
+ }
+ if (loop_cnt == GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+struct nic_drv_module_handle nic_driv_module_cmd_handle[] = {
+ {TX_INFO, get_tx_info},
+ {Q_NUM, get_q_num},
+ {TX_WQE_INFO, get_tx_wqe_info},
+ {RX_INFO, get_rx_info},
+ {RX_WQE_INFO, get_rx_wqe_info},
+ {RX_CQE_INFO, get_rx_cqe_info},
+ {GET_INTER_NUM, get_inter_num},
+ {CLEAR_FUNC_STASTIC, clear_func_static},
+ {GET_NUM_COS, get_num_cos},
+ {GET_COS_UP_MAP, get_dcb_cos_up_map},
+ {SET_COS_UP_MAP, set_dcb_cos_up_map},
+ {GET_LOOPBACK_MODE, get_loopback_mode},
+ {SET_LOOPBACK_MODE, set_loopback_mode},
+ {SET_LINK_MODE, set_link_mode},
+ {SET_PF_BW_LIMIT, set_pf_bw_limit},
+ {GET_PF_BW_LIMIT, get_pf_bw_limit},
+ {GET_POLL_WEIGHT, get_poll_weight},
+ {SET_POLL_WEIGHT, set_poll_weight},
+ {GET_HOMOLOGUE, get_homologue},
+ {SET_HOMOLOGUE, set_homologue},
+ {GET_SSET_COUNT, get_sset_count},
+ {GET_SSET_ITEMS, get_sset_stats},
+ {SET_PFC_CONTROL, set_pfc_control},
+ {SET_ETS, set_ets},
+ {GET_ETS_INFO, get_ets_info},
+ {SET_PFC_PRIORITY, set_pfc_priority},
+ {SET_DCB_CFG, set_dcb_cfg},
+ {GET_PFC_INFO, get_pfc_info},
+ {GET_SUPPORT_UP, get_support_up},
+ {GET_SUPPORT_TC, get_support_tc},
+};
+
+struct hw_drv_module_handle hw_driv_module_cmd_handle[] = {
+ {FUNC_TYPE, get_func_type},
+ {GET_FUNC_IDX, get_func_id},
+ {GET_DRV_VERSION, get_drv_version},
+ {GET_HW_STATS, get_hw_stats},
+ {CLEAR_HW_STATS, clear_hw_stats},
+ {GET_SELF_TEST_RES, get_self_test},
+ {GET_CHIP_FAULT_STATS, get_chip_faults_stats},
+ {GET_CHIP_ID, get_chip_id_test},
+ {GET_SINGLE_CARD_INFO, get_single_card_info},
+ {GET_FIRMWARE_ACTIVE_STATUS, get_firmware_active_status},
+ {GET_DEVICE_ID, get_device_id},
+ {IS_DRV_IN_VM, is_driver_in_vm},
+ {GET_PF_ID, get_pf_id},
+};
+
+static int send_to_nic_driver(struct hinic_nic_dev *nic_dev,
+ u32 cmd, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ int index, num_cmds = sizeof(nic_driv_module_cmd_handle) /
+ sizeof(nic_driv_module_cmd_handle[0]);
+ enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd;
+ int err = 0;
+
+ mutex_lock(&nic_dev->nic_mutex);
+ for (index = 0; index < num_cmds; index++) {
+ if (cmd_type ==
+ nic_driv_module_cmd_handle[index].driv_cmd_name) {
+ err = nic_driv_module_cmd_handle[index].driv_func
+ (nic_dev, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ }
+ }
+ mutex_unlock(&nic_dev->nic_mutex);
+
+ if (index == num_cmds)
+ return -EINVAL;
+ return err;
+}
+
+static int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out,
+ u32 *out_size)
+{
+ int index, num_cmds = sizeof(hw_driv_module_cmd_handle) /
+ sizeof(hw_driv_module_cmd_handle[0]);
+ enum driver_cmd_type cmd_type =
+ (enum driver_cmd_type)(nt_msg->msg_formate);
+ int err = 0;
+
+ for (index = 0; index < num_cmds; index++) {
+ if (cmd_type ==
+ hw_driv_module_cmd_handle[index].driv_cmd_name) {
+ err = hw_driv_module_cmd_handle[index].driv_func
+ (hwdev, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ }
+ }
+ if (index == num_cmds)
+ return -EINVAL;
+ return err;
+}
+
+static int send_to_ucode(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out,
+ u32 *out_size)
+{
+ int ret = 0;
+
+ if (nt_msg->ucode_cmd.ucode_db.ucode_imm) {
+ ret = hinic_cmdq_direct_resp
+ (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type,
+ nt_msg->ucode_cmd.ucode_db.comm_mod_type,
+ nt_msg->ucode_cmd.ucode_db.ucode_cmd_type,
+ buf_in, buf_out, 0);
+ if (ret)
+ pr_err("Send direct cmdq err: %d\n", ret);
+ } else {
+ ret = hinic_cmdq_detail_resp
+ (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type,
+ nt_msg->ucode_cmd.ucode_db.comm_mod_type,
+ nt_msg->ucode_cmd.ucode_db.ucode_cmd_type,
+ buf_in, buf_out, 0);
+ if (ret)
+ pr_err("Send detail cmdq err: %d\n", ret);
+ }
+
+ return ret;
+}
+
+enum api_csr_op_width {
+ OP_WIDTH_4B,
+ OP_WIDTH_8B,
+ OP_WIDTH_UNKNOWN,
+};
+
+static int api_csr_read(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out, u32 *out_size,
+ enum api_csr_op_width width)
+{
+ struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in;
+ u32 op_bytes = (width == OP_WIDTH_4B ? sizeof(u32) : sizeof(u64));
+ int ret = 0;
+ u32 rd_len, rd_addr, rd_cnt;
+ u32 offset = 0;
+ u8 node_id;
+ u32 i;
+
+ if (!buf_in || !buf_out || in_size != sizeof(*up_log_msg) ||
+ *out_size != up_log_msg->rd_len || width >= OP_WIDTH_UNKNOWN)
+ return -EINVAL;
+
+ rd_len = up_log_msg->rd_len;
+ rd_addr = up_log_msg->addr;
+ node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type;
+
+ rd_cnt = rd_len / op_bytes;
+
+ if (rd_len % op_bytes)
+ rd_cnt++;
+
+ for (i = 0; i < rd_cnt; i++) {
+ if (width == OP_WIDTH_4B)
+ ret = hinic_api_csr_rd32(hwdev, node_id,
+ rd_addr + offset,
+ (u32 *)(((u8 *)buf_out) +
+ offset));
+ else
+ ret = hinic_api_csr_rd64(hwdev, node_id,
+ rd_addr + offset,
+ (u64 *)(((u8 *)buf_out) +
+ offset));
+ if (ret) {
+ pr_err("Read csr failed, err: %d, node_id: %d, csr addr: 0x%08x\n",
+ ret, node_id, rd_addr + offset);
+ return ret;
+ }
+ offset += op_bytes;
+ }
+ *out_size = rd_len;
+
+ return ret;
+}
+
+static int api_csr_write(void *hwdev, struct msg_module *nt_msg, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size,
+ enum api_csr_op_width width)
+{
+ struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in;
+ u32 op_bytes = (width == OP_WIDTH_4B ? sizeof(u32) : sizeof(u64));
+ int ret = 0;
+ u32 rd_len, rd_addr, rd_cnt;
+ u32 offset = 0;
+ u8 node_id;
+ u32 i;
+ u8 *data = NULL;
+
+ if (!buf_in || in_size != sizeof(*csr_write_msg) ||
+ width >= OP_WIDTH_UNKNOWN)
+ return -EINVAL;
+
+ rd_len = csr_write_msg->rd_len;
+ rd_addr = csr_write_msg->addr;
+ node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type;
+
+ if (rd_len % op_bytes) {
+ pr_err("Csr length must be a multiple of %d\n", op_bytes);
+ return -EFAULT;
+ }
+
+ rd_cnt = rd_len / op_bytes;
+ data = kzalloc(rd_len, GFP_KERNEL);
+ if (!data) {
+ pr_err("No more memory\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) {
+ pr_err("Copy information from user failed\n");
+ kfree(data);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < rd_cnt; i++) {
+ if (width == OP_WIDTH_4B)
+ ret = hinic_api_csr_wr32(hwdev, node_id,
+ rd_addr + offset,
+ *((u32 *)(data + offset)));
+ else
+ ret = hinic_api_csr_wr64(hwdev, node_id,
+ rd_addr + offset,
+ *((u64 *)(data + offset)));
+ if (ret) {
+ pr_err("Write csr failed, ret: %d, node_id: %d, csr addr: 0x%08x\n",
+ ret, rd_addr + offset, node_id);
+ kfree(data);
+ return ret;
+ }
+ offset += op_bytes;
+ }
+
+ *out_size = 0;
+ kfree(data);
+ return ret;
+}
+
+static u32 get_up_timeout_val(enum hinic_mod_type mod, u8 cmd)
+{
+ if (mod == HINIC_MOD_L2NIC && cmd == HINIC_PORT_CMD_UPDATE_FW)
+ return UP_UPDATEFW_TIME_OUT_VAL;
+ else
+ return UP_COMP_TIME_OUT_VAL;
+}
+
+static int check_useparam_valid(struct msg_module *nt_msg, void *buf_in)
+{
+ struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in;
+ u32 rd_len = csr_write_msg->rd_len;
+
+ if (rd_len > TOOL_COUNTER_MAX_LEN) {
+ pr_err("Csr read or write len is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int send_to_up(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
+{
+ int ret = 0;
+
+ if (nt_msg->up_cmd.up_db.up_api_type == API_CMD ||
+ nt_msg->up_cmd.up_db.up_api_type == API_CLP) {
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u32 timeout;
+
+ mod = (enum hinic_mod_type)nt_msg->up_cmd.up_db.comm_mod_type;
+ cmd = nt_msg->up_cmd.up_db.chipif_cmd;
+
+ timeout = get_up_timeout_val(mod, cmd);
+
+ if (nt_msg->up_cmd.up_db.up_api_type == API_CMD)
+ ret = hinic_msg_to_mgmt_sync(hwdev, mod, cmd,
+ buf_in, (u16)in_size,
+ buf_out, (u16 *)out_size,
+ timeout);
+ else
+ ret = hinic_clp_to_mgmt(hwdev, mod, cmd,
+ buf_in, (u16)in_size,
+ buf_out, (u16 *)out_size);
+ if (ret) {
+ pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %d\n",
+ mod, cmd);
+ return ret;
+ }
+
+ } else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) {
+ if (check_useparam_valid(nt_msg, buf_in))
+ return -EINVAL;
+
+ switch (nt_msg->up_cmd.up_db.chipif_cmd) {
+ case API_CSR_WRITE:
+ ret = api_csr_write(hwdev, nt_msg, buf_in, in_size,
+ buf_out, out_size, OP_WIDTH_4B);
+ break;
+ case API_CSR_READ:
+ ret = api_csr_read(hwdev, nt_msg, buf_in, in_size,
+ buf_out, out_size, OP_WIDTH_4B);
+ break;
+ case API_CSR_WRITE_8B:
+ ret = api_csr_write(hwdev, nt_msg, buf_in, in_size,
+ buf_out, out_size, OP_WIDTH_8B);
+ break;
+ case API_CSR_READ_8B:
+ ret = api_csr_read(hwdev, nt_msg, buf_in, in_size,
+ buf_out, out_size, OP_WIDTH_8B);
+ break;
+ default:
+ pr_err("Unsupported chipif cmd: %d\n",
+ nt_msg->up_cmd.up_db.chipif_cmd);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int sm_rd32(void *hwdev, u32 id, u8 instance,
+ u8 node, struct sm_out_st *buf_out)
+{
+ u32 val1;
+ int ret;
+
+ ret = hinic_sm_ctr_rd32(hwdev, node, instance, id, &val1);
+ if (ret) {
+ pr_err("Get sm ctr information (32 bits)failed\n");
+ val1 = 0xffffffff;
+ }
+
+ buf_out->val1 = val1;
+
+ return ret;
+}
+
+static int sm_rd64_pair(void *hwdev, u32 id, u8 instance,
+ u8 node, struct sm_out_st *buf_out)
+{
+ u64 val1 = 0, val2 = 0;
+ int ret;
+
+ ret = hinic_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2);
+ if (ret) {
+ pr_err("Get sm ctr information (64 bits pair)failed\n");
+ val1 = 0xffffffff;
+ }
+
+ buf_out->val1 = val1;
+ buf_out->val2 = val2;
+
+ return ret;
+}
+
+static int sm_rd64(void *hwdev, u32 id, u8 instance,
+ u8 node, struct sm_out_st *buf_out)
+{
+ u64 val1;
+ int ret;
+
+ ret = hinic_sm_ctr_rd64(hwdev, node, instance, id, &val1);
+ if (ret) {
+ pr_err("Get sm ctr information (64 bits)failed\n");
+ val1 = 0xffffffff;
+ }
+ buf_out->val1 = val1;
+
+ return ret;
+}
+
+typedef int (*sm_module)(void *hwdev, u32 id, u8 instance,
+ u8 node, struct sm_out_st *buf_out);
+
+struct sm_module_handle {
+ enum sm_cmd_type sm_cmd_name;
+ sm_module sm_func;
+};
+
+static struct sm_module_handle sm_module_cmd_handle[] = {
+ {SM_CTR_RD32, sm_rd32},
+ {SM_CTR_RD64_PAIR, sm_rd64_pair},
+ {SM_CTR_RD64, sm_rd64}
+};
+
+static int send_to_sm(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct sm_in_st *sm_in = buf_in;
+ struct sm_out_st *sm_out = buf_out;
+ u32 msg_formate = nt_msg->msg_formate;
+ int index, num_cmds = sizeof(sm_module_cmd_handle) /
+ sizeof(sm_module_cmd_handle[0]);
+ int ret = 0;
+
+ if (!buf_in || !buf_out || in_size != sizeof(*sm_in) ||
+ *out_size != sizeof(*sm_out))
+ return -EINVAL;
+
+ for (index = 0; index < num_cmds; index++) {
+ if (msg_formate == sm_module_cmd_handle[index].sm_cmd_name)
+ ret = sm_module_cmd_handle[index].sm_func(hwdev,
+ (u32)sm_in->id,
+ (u8)sm_in->instance,
+ (u8)sm_in->node, sm_out);
+ }
+
+ if (ret)
+ pr_err("Get sm information fail\n");
+
+ *out_size = sizeof(struct sm_out_st);
+
+ return ret;
+}
+
+static bool is_hwdev_cmd_support(unsigned int mod,
+ char *ifname, u32 up_api_type)
+{
+ void *hwdev;
+
+ hwdev = hinic_get_hwdev_by_ifname(ifname);
+ if (!hwdev) {
+ pr_err("Can not get the device %s correctly\n", ifname);
+ return false;
+ }
+
+ switch (mod) {
+ case SEND_TO_UP:
+ case SEND_TO_SM:
+ if (FUNC_SUPPORT_MGMT(hwdev)) {
+ if (up_api_type == API_CLP) {
+ if (!hinic_is_hwdev_mod_inited
+ (hwdev, HINIC_HWDEV_CLP_INITED)) {
+ pr_err("CLP have not initialized\n");
+ return false;
+ }
+ } else if (!hinic_is_hwdev_mod_inited
+ (hwdev, HINIC_HWDEV_MGMT_INITED)) {
+ pr_err("MGMT have not initialized\n");
+ return false;
+ }
+ } else if (!hinic_is_hwdev_mod_inited
+ (hwdev, HINIC_HWDEV_MBOX_INITED)) {
+ pr_err("MBOX have not initialized\n");
+ return false;
+ }
+
+ if (mod == SEND_TO_SM &&
+ ((hinic_func_type(hwdev) == TYPE_VF) ||
+ (!hinic_is_hwdev_mod_inited(hwdev,
+ HINIC_HWDEV_MGMT_INITED)))) {
+ pr_err("Current function do not support this cmd\n");
+ return false;
+ }
+ break;
+
+ case SEND_TO_UCODE:
+ if (!hinic_is_hwdev_mod_inited(hwdev,
+ HINIC_HWDEV_CMDQ_INITED)) {
+ pr_err("CMDQ have not initialized\n");
+ return false;
+ }
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool nictool_k_is_cmd_support(unsigned int mod,
+ char *ifname, u32 up_api_type)
+{
+ enum hinic_init_state init_state =
+ hinic_get_init_state_by_ifname(ifname);
+ bool support = true;
+
+ if (init_state == HINIC_INIT_STATE_NONE)
+ return false;
+
+ if (mod == SEND_TO_NIC_DRIVER) {
+ if (init_state < HINIC_INIT_STATE_NIC_INITED) {
+ pr_err("NIC driver have not initialized\n");
+ return false;
+ }
+ } else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) {
+ return is_hwdev_cmd_support(mod, ifname, up_api_type);
+ } else if ((mod >= HINICADM_OVS_DRIVER &&
+ mod <= HINICADM_FCOE_DRIVER) ||
+ mod == SEND_TO_HW_DRIVER) {
+ if (init_state < HINIC_INIT_STATE_HWDEV_INITED) {
+ pr_err("Hwdev have not initialized\n");
+ return false;
+ }
+ } else {
+ pr_err("Unsupport mod %d\n", mod);
+ support = false;
+ }
+
+ return support;
+}
+
+static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size,
+ void **buf_in, u32 out_size, void **buf_out)
+{
+ int ret;
+
+ ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in);
+ if (ret) {
+ pr_err("Alloc tool cmd buff in failed\n");
+ return ret;
+ }
+
+ ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out);
+ if (ret) {
+ pr_err("Alloc tool cmd buff out failed\n");
+ goto out_free_buf_in;
+ }
+
+ return 0;
+
+out_free_buf_in:
+ free_buff_in(hwdev, nt_msg, *buf_in);
+
+ return ret;
+}
+
+static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, void *buf_out)
+{
+ free_buff_out(hwdev, nt_msg, buf_out);
+ free_buff_in(hwdev, nt_msg, buf_in);
+}
+
+static int get_self_test_cmd(struct msg_module *nt_msg)
+{
+ int ret;
+ u32 res = 0;
+
+ ret = hinic_get_self_test_result(nt_msg->device_name, &res);
+ if (ret) {
+ pr_err("Get self test result failed\n");
+ return -EFAULT;
+ }
+
+ ret = copy_buf_out_to_user(nt_msg, sizeof(res), &res);
+ if (ret)
+ pr_err("%s:%d:: Copy to user failed\n", __func__, __LINE__);
+
+ return ret;
+}
+
+static int get_all_chip_id_cmd(struct msg_module *nt_msg)
+{
+ struct nic_card_id card_id;
+
+ memset(&card_id, 0, sizeof(card_id));
+
+ hinic_get_all_chip_id((void *)&card_id);
+
+ if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) {
+ pr_err("Copy chip id to user failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ return send_to_nic_driver(uld_dev, cmd, buf_in,
+ in_size, buf_out, out_size);
+}
+
+static void *__get_dev_support_nic_cmd(struct msg_module *nt_msg,
+ enum hinic_service_type type)
+{
+ void *uld_dev = NULL;
+
+ /* set/get qos must use chip_name(hinic0) */
+ switch (nt_msg->msg_formate) {
+ case GET_COS_UP_MAP:
+ case SET_COS_UP_MAP:
+ case GET_NUM_COS:
+ uld_dev = hinic_get_uld_by_chip_name(nt_msg->device_name, type);
+ if (!uld_dev)
+ pr_err("Get/set cos_up must use chip_name(hinic0)\n");
+
+ return uld_dev;
+
+ default:
+ break;
+ }
+
+ uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type);
+ if (!uld_dev)
+ pr_err("Can not get the uld dev correctly: %s, nic driver may be not register\n",
+ nt_msg->device_name);
+
+ return uld_dev;
+}
+
+static void *get_support_uld_dev(struct msg_module *nt_msg,
+ enum hinic_service_type type)
+{
+ char *service_name[SERVICE_T_MAX] = {"NIC", "OVS", "ROCE", "TOE",
+ "IWARP", "FC", "FCOE"};
+ void *hwdev = NULL;
+ void *uld_dev = NULL;
+
+ switch (nt_msg->module) {
+ case SEND_TO_NIC_DRIVER:
+ hwdev = hinic_get_hwdev_by_ifname(nt_msg->device_name);
+ if (!hinic_support_nic(hwdev, NULL)) {
+ pr_err("Current function don't support NIC\n");
+ return NULL;
+ }
+ return __get_dev_support_nic_cmd(nt_msg, type);
+ default:
+ break;
+ }
+
+ uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type);
+ if (!uld_dev)
+ pr_err("Can not get the uld dev correctly: %s, %s driver may be not register\n",
+ nt_msg->device_name, service_name[type]);
+
+ return uld_dev;
+}
+
+static int get_service_drv_version(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out,
+ u32 *out_size)
+{
+ enum hinic_service_type type;
+ int ret = 0;
+
+ type = nt_msg->module - SEND_TO_SM;
+ *out_size = sizeof(struct drv_version_info);
+
+ if (!g_uld_info[type].ioctl)
+ return ret;
+
+ ret = g_uld_info[type].ioctl(NULL, nt_msg->msg_formate, buf_in, in_size,
+ buf_out, out_size);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(nt_msg->out_buf, buf_out, *out_size))
+ return -EFAULT;
+
+ return ret;
+}
+
+static int send_to_service_driver(struct msg_module *nt_msg, void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ enum hinic_service_type type;
+ void *uld_dev;
+ int ret = -EINVAL;
+
+ if (nt_msg->module == SEND_TO_NIC_DRIVER)
+ type = SERVICE_T_NIC;
+ else
+ type = nt_msg->module - SEND_TO_SM;
+
+ if (type < SERVICE_T_MAX) {
+ uld_dev = get_support_uld_dev(nt_msg, type);
+ if (!uld_dev)
+ return -EINVAL;
+
+ if (g_uld_info[type].ioctl)
+ ret = g_uld_info[type].ioctl(uld_dev,
+ nt_msg->msg_formate,
+ buf_in, in_size, buf_out,
+ out_size);
+ } else {
+ pr_err("Ioctl input module id: %d is incorrectly\n",
+ nt_msg->module);
+ }
+
+ return ret;
+}
+
+static int nictool_exec_cmd(void *hwdev, struct msg_module *nt_msg,
+ void *buf_in, u32 in_size, void *buf_out,
+ u32 *out_size)
+{
+ int ret;
+
+ switch (nt_msg->module) {
+ case SEND_TO_HW_DRIVER:
+ ret = send_to_hw_driver(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ case SEND_TO_UP:
+ ret = send_to_up(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ case SEND_TO_UCODE:
+ ret = send_to_ucode(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ case SEND_TO_SM:
+ ret = send_to_sm(hwdev, nt_msg, buf_in,
+ in_size, buf_out, out_size);
+ break;
+ default:
+ ret = send_to_service_driver(nt_msg, buf_in, in_size, buf_out,
+ out_size);
+ break;
+ }
+
+ return ret;
+}
+
+static int get_nictool_drv_cap(struct msg_module *nt_msg)
+{
+ int ret;
+ u64 support = 0;
+
+ if (nt_msg->len_info.out_buff_len != sizeof(u64)) {
+ pr_err("Unexpect out buf size from user: %d, expect: %lu\n",
+ nt_msg->len_info.out_buff_len, sizeof(u64));
+ return -EINVAL;
+ }
+
+ support |= NICTOOL_SUPPORT_API_CSR;
+
+ ret = copy_buf_out_to_user(nt_msg, sizeof(support), &support);
+ if (ret)
+ pr_err("%s:%d:: Copy to user failed\n", __func__, __LINE__);
+
+ return ret;
+}
+
+static bool hinic_is_special_handling_cmd(struct msg_module *nt_msg, int *ret)
+{
+ bool handled = true;
+
+ if (nt_msg->module != SEND_TO_HW_DRIVER)
+ return false;
+
+ switch (nt_msg->msg_formate) {
+ case GET_SELF_TEST_RES:
+ *ret = get_self_test_cmd(nt_msg);
+ break;
+ case GET_CHIP_ID:
+ *ret = get_all_chip_id_cmd(nt_msg);
+ break;
+ case GET_PF_DEV_INFO:
+ *ret = get_pf_dev_info(nt_msg->device_name, nt_msg);
+ break;
+ case CMD_FREE_MEM:
+ *ret = knl_free_mem(nt_msg->device_name, nt_msg);
+ break;
+ case GET_CHIP_INFO:
+ *ret = get_card_func_info(nt_msg->device_name, nt_msg);
+ break;
+ case GET_NICTOOL_CAP:
+ *ret = get_nictool_drv_cap(nt_msg);
+ break;
+ default:
+ handled = false;
+ break;
+ }
+
+ return handled;
+}
+
+static long nictool_k_unlocked_ioctl(struct file *pfile,
+ unsigned int cmd, unsigned long arg)
+{
+ void *hwdev;
+ struct msg_module nt_msg;
+ void *buf_out = NULL;
+ void *buf_in = NULL;
+ u32 out_size_expect = 0;
+ u32 out_size = 0;
+ u32 in_size = 0;
+ unsigned int cmd_raw = 0;
+ int ret = 0;
+
+ memset(&nt_msg, 0, sizeof(nt_msg));
+
+ if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) {
+ pr_err("Copy information from user failed\n");
+ return -EFAULT;
+ }
+
+ /* end with '\0' */
+ nt_msg.device_name[IFNAMSIZ - 1] = '\0';
+
+ cmd_raw = nt_msg.module;
+
+ out_size_expect = nt_msg.len_info.out_buff_len;
+ in_size = nt_msg.len_info.in_buff_len;
+
+ hinic_tool_cnt_inc();
+
+ if (hinic_is_special_handling_cmd(&nt_msg, &ret))
+ goto out_free_lock;
+
+ if (cmd_raw == HINICADM_FC_DRIVER &&
+ nt_msg.msg_formate == GET_CHIP_ID)
+ hinic_get_fc_devname(nt_msg.device_name);
+
+ if (!nictool_k_is_cmd_support(cmd_raw, nt_msg.device_name,
+ nt_msg.up_cmd.up_db.up_api_type)) {
+ ret = -EFAULT;
+ goto out_free_lock;
+ }
+
+ /* get the netdevice */
+ hwdev = hinic_get_hwdev_by_ifname(nt_msg.device_name);
+ if (!hwdev) {
+ pr_err("Can not get the device %s correctly\n",
+ nt_msg.device_name);
+ ret = -ENODEV;
+ goto out_free_lock;
+ }
+
+ ret = alloc_tmp_buf(hwdev, &nt_msg, in_size,
+ &buf_in, out_size_expect, &buf_out);
+ if (ret) {
+ pr_err("Alloc tmp buff failed\n");
+ goto out_free_lock;
+ }
+
+ out_size = out_size_expect;
+
+ if (nt_msg.msg_formate == GET_DRV_VERSION &&
+ (cmd_raw == HINICADM_FC_DRIVER || cmd_raw == HINICADM_TOE_DRIVER)) {
+ ret = get_service_drv_version(hwdev, &nt_msg, buf_in,
+ in_size, buf_out, &out_size);
+ goto out_free_buf;
+ }
+
+ ret = nictool_exec_cmd(hwdev, &nt_msg, buf_in,
+ in_size, buf_out, &out_size);
+ if (ret)
+ goto out_free_buf;
+
+ ret = copy_buf_out_to_user(&nt_msg, out_size_expect, buf_out);
+ if (ret)
+ pr_err("Copy information to user failed\n");
+
+out_free_buf:
+ free_tmp_buf(hwdev, &nt_msg, buf_in, buf_out);
+
+out_free_lock:
+ hinic_tool_cnt_dec();
+
+ return (long)ret;
+}
+
+static int nictool_k_open(struct inode *pnode, struct file *pfile)
+{
+ return 0;
+}
+
+static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf,
+ size_t size, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf,
+ size_t size, loff_t *ppos)
+{
+ return 0;
+}
+
+static const struct file_operations fifo_operations = {
+ .owner = THIS_MODULE,
+ .open = nictool_k_open,
+ .read = nictool_k_read,
+ .write = nictool_k_write,
+ .unlocked_ioctl = nictool_k_unlocked_ioctl,
+ .mmap = hinic_mem_mmap,
+};
+
+static int if_nictool_exist(void)
+{
+ struct file *fp = NULL;
+ int exist = 0;
+
+ fp = filp_open(HIADM_DEV_PATH, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ exist = 0;
+ } else {
+ (void)filp_close(fp, NULL);
+ exist = 1;
+ }
+
+ return exist;
+}
+
+/**
+ * hinic_tool_k_init - initialize the hw interface
+ */
+int hinic_tool_k_init(void)
+{
+ int ret;
+ struct device *pdevice;
+
+ if (g_nictool_init_flag) {
+ g_nictool_ref_cnt++;
+ /* already initialized */
+ return 0;
+ }
+
+ if (if_nictool_exist()) {
+ pr_err("Nictool device exists\n");
+ return 0;
+ }
+
+ ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME);
+ if (ret < 0) {
+ pr_err("Register nictool_dev fail(0x%x)\n", ret);
+ return ret;
+ }
+
+ /* Create equipment */
+ /*lint -save -e160*/
+ g_nictool_class = class_create(HIADM_DEV_CLASS);
+ /*lint -restore*/
+ if (IS_ERR(g_nictool_class)) {
+ pr_err("Create nictool_class fail\n");
+ ret = -EFAULT;
+ goto class_create_err;
+ }
+
+ /* Initializing the character device */
+ cdev_init(&g_nictool_cdev, &fifo_operations);
+
+ /* Add devices to the operating system */
+ ret = cdev_add(&g_nictool_cdev, g_dev_id, 1);
+ if (ret < 0) {
+ pr_err("Add nictool_dev to operating system fail(0x%x)\n", ret);
+ goto cdev_add_err;
+ }
+
+ /* Export device information to user space
+ * (/sys/class/class name/device name)
+ */
+ pdevice = device_create(g_nictool_class, NULL,
+ g_dev_id, NULL, HIADM_DEV_NAME);
+ if (IS_ERR(pdevice)) {
+ pr_err("Export nictool device information to user space fail\n");
+ ret = -EFAULT;
+ goto device_create_err;
+ }
+
+ g_nictool_init_flag = 1;
+ g_nictool_ref_cnt = 1;
+
+ pr_info("Register nictool_dev to system succeed\n");
+
+ return 0;
+
+device_create_err:
+ cdev_del(&g_nictool_cdev);
+
+cdev_add_err:
+ class_destroy(g_nictool_class);
+
+class_create_err:
+ g_nictool_class = NULL;
+ unregister_chrdev_region(g_dev_id, 1);
+
+ return ret;
+}
+
+void hinic_tool_k_uninit(void)
+{
+ if (g_nictool_init_flag) {
+ if ((--g_nictool_ref_cnt))
+ return;
+ }
+
+ g_nictool_init_flag = 0;
+
+ if (!g_nictool_class || IS_ERR(g_nictool_class))
+ return;
+
+ cdev_del(&g_nictool_cdev);
+ device_destroy(g_nictool_class, g_dev_id);
+ class_destroy(g_nictool_class);
+ g_nictool_class = NULL;
+
+ unregister_chrdev_region(g_dev_id, 1);
+
+ pr_info("Unregister nictool_dev succeed\n");
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
new file mode 100644
index 000000000000..4d2fd351b99e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_NICTOOL_H_
+#define HINIC_NICTOOL_H_
+
+#include "hinic_dfx_def.h"
+/* completion timeout interval, unit is jiffies*/
+#define UP_COMP_TIME_OUT_VAL 10000U
+
+struct sm_in_st {
+ int node;
+ int id;
+ int instance;
+};
+
+struct sm_out_st {
+ u64 val1;
+ u64 val2;
+};
+
+struct up_log_msg_st {
+ u32 rd_len;
+ u32 addr;
+};
+
+struct csr_write_st {
+ u32 rd_len;
+ u32 addr;
+ u8 *data;
+};
+
+struct ipsurx_stats_info {
+ u32 addr;
+ u32 rd_cnt;
+};
+
+struct ucode_cmd_st {
+ union {
+ struct {
+ u32 comm_mod_type : 8;
+ u32 ucode_cmd_type : 4;
+ u32 cmdq_ack_type : 3;
+ u32 ucode_imm : 1;
+ u32 len : 16;
+ } ucode_db;
+ u32 value;
+ };
+};
+
+struct up_cmd_st {
+ union {
+ struct {
+ u32 comm_mod_type : 8;
+ u32 chipif_cmd : 8;
+ u32 up_api_type : 16;
+ } up_db;
+ u32 value;
+ };
+};
+
+struct _dcb_data {
+ u8 wr_flag;
+ u8 dcb_en;
+ u8 err;
+ u8 rsvd;
+};
+
+union _dcb_ctl {
+ struct _dcb_data dcb_data;
+ u32 data;
+};
+
+struct _pfc_data {
+ u8 pfc_en;
+ u8 pfc_priority;
+ u8 num_of_tc;
+ u8 err;
+};
+
+union _pfc {
+ struct _pfc_data pfc_data;
+ u32 data;
+};
+
+union _flag_com {
+ struct _ets_flag {
+ u8 flag_ets_enable : 1;
+ u8 flag_ets_percent : 1;
+ u8 flag_ets_cos : 1;
+ u8 flag_ets_strict : 1;
+ u8 rev : 4;
+ } ets_flag;
+ u8 data;
+};
+
+struct _ets {
+ u8 ets_en;
+ u8 err;
+ u8 strict;
+ u8 tc[8];
+ u8 ets_percent[8];
+ union _flag_com flag_com;
+};
+
+#define API_CMD 0x1
+#define API_CHAIN 0x2
+#define API_CLP 0x3
+
+struct msg_module {
+ char device_name[IFNAMSIZ];
+ unsigned int module;
+ union {
+ u32 msg_formate;
+ struct ucode_cmd_st ucode_cmd;
+ struct up_cmd_st up_cmd;
+ };
+
+ struct {
+ u32 in_buff_len;
+ u32 out_buff_len;
+ } len_info;
+ u32 res;
+ void *in_buff;
+ void *out_buf;
+};
+
+#define MAX_VER_INFO_LEN 128
+struct drv_version_info {
+ char ver[MAX_VER_INFO_LEN];
+};
+
+struct chip_fault_stats {
+ int offset;
+ u8 chip_faults[MAX_DRV_BUF_SIZE];
+};
+
+struct hinic_wqe_info {
+ int q_id;
+ void *slq_handle;
+ unsigned int wqe_id;
+};
+
+struct hinic_cos_up_map {
+ u8 cos_up[HINIC_DCB_UP_MAX];
+ u8 num_cos;
+};
+
+struct hinic_tx_hw_page {
+ u64 phy_addr;
+ u64 *map_addr;
+};
+
+struct hinic_dbg_sq_info {
+ u16 q_id;
+ u16 pi;
+ u16 ci; /* sw_ci */
+ u16 fi; /* hw_ci */
+
+ u32 q_depth;
+ u16 pi_reverse;
+ u16 weqbb_size;
+
+ u8 priority;
+ u16 *ci_addr;
+ u64 cla_addr;
+
+ void *slq_handle;
+
+ struct hinic_tx_hw_page direct_wqe;
+ struct hinic_tx_hw_page db_addr;
+ u32 pg_idx;
+
+ u32 glb_sq_id;
+};
+
+struct hinic_dbg_rq_info {
+ u16 q_id;
+ u16 glb_rq_id;
+ u16 hw_pi;
+ u16 ci; /* sw_ci */
+ u16 sw_pi;
+ u16 wqebb_size;
+ u16 q_depth;
+ u16 buf_len;
+
+ void *slq_handle;
+ u64 ci_wqe_page_addr;
+ u64 ci_cla_tbl_addr;
+
+ u16 msix_idx;
+ u32 msix_vector;
+};
+
+#define BUSINFO_LEN 32
+struct pf_info {
+ char name[IFNAMSIZ];
+ char bus_info[BUSINFO_LEN];
+ u32 pf_type;
+};
+
+#define MAX_SIZE 16
+struct card_info {
+ struct pf_info pf[MAX_SIZE];
+ u32 pf_num;
+};
+
+struct nic_card_id {
+ u32 id[MAX_SIZE];
+ u32 num;
+};
+
+struct func_pdev_info {
+ u64 bar0_phy_addr;
+ u64 bar0_size;
+ u64 rsvd1[4];
+};
+
+struct hinic_card_func_info {
+ u32 num_pf;
+ u32 rsvd0;
+ u64 usr_api_phy_addr;
+ struct func_pdev_info pdev_info[MAX_SIZE];
+};
+
+#define MAX_CARD_NUM 64
+extern void *g_hinic_card_node_array[MAX_CARD_NUM];
+extern void *g_hinic_card_vir_addr[MAX_CARD_NUM];
+extern u64 g_hinic_card_phy_addr[MAX_CARD_NUM];
+extern struct mutex g_hinic_addr_lock;
+extern int g_hinic_card_id;
+
+struct hinic_nic_loop_mode {
+ u32 loop_mode;
+ u32 loop_ctrl;
+};
+
+struct hinic_nic_poll_weight {
+ int poll_weight;
+};
+
+enum hinic_homologues_state {
+ HINIC_HOMOLOGUES_OFF = 0,
+ HINIC_HOMOLOGUES_ON = 1,
+};
+
+struct hinic_homologues {
+ enum hinic_homologues_state homo_state;
+};
+
+struct hinic_pf_info {
+ u32 isvalid;
+ u32 pf_id;
+};
+
+int hinic_tool_k_init(void);
+void hinic_tool_k_uninit(void);
+
+u32 hinic_get_io_stats_size(struct hinic_nic_dev *nic_dev);
+void hinic_get_io_stats(struct hinic_nic_dev *nic_dev,
+ struct hinic_show_item *items);
+
+#define TOOL_COUNTER_MAX_LEN 512
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h
new file mode 100644
index 000000000000..d225e543f68a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_PCI_ID_TBL_H
+#define HINIC_PCI_ID_TBL_H
+
+#define PCI_VENDOR_ID_HUAWEI 0x19e5
+#define HINIC_DEV_ID_1822_PF 0x1822
+#define HINIC_DEV_ID_1822_VF 0x375E
+#define HINIC_DEV_ID_1822_VF_HV 0x379E
+#define HINIC_DEV_ID_1822_SMTIO 0x020B
+#define HINIC_DEV_ID_1822_PANGEA_100GE 0x0208
+#define HINIC_DEV_ID_1822_PANGEA_TP_10GE 0x0204
+#define HINIC_DEV_ID_1822_KR_40GE 0x020D
+#define HINIC_DEV_ID_1822_KR_100GE 0x0205
+#define HINIC_DEV_ID_1822_DUAL_25GE 0x0206
+#define HINIC_DEV_ID_1822_KR_25GE 0x0210
+#define HINIC_DEV_ID_1822_MULTI_HOST 0x0211
+#define HINIC_DEV_ID_1822_100GE 0x0200
+#define HINIC_DEV_ID_1822_100GE_MULTI_HOST 0x0201
+
+#define HIFC_DEV_ID_1822_8G 0x0212
+#define HIFC_DEV_ID_1822_16G 0x0203
+#define HIFC_DEV_ID_1822_32G 0x0202
+
+#define HIFC_DEV_ID_1822_SMTIO 0x020C
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
deleted file mode 100644
index f81a43d2cdfc..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ /dev/null
@@ -1,1412 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_dev.h"
-#include "hinic_port.h"
-#include "hinic_dev.h"
-
-enum mac_op {
- MAC_DEL,
- MAC_SET,
-};
-
-/**
- * change_mac - change(add or delete) mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number to set with the mac
- * @op: add or delete the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id, enum mac_op op)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mac_cmd port_mac_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- u16 out_size = sizeof(port_mac_cmd);
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_port_cmd cmd;
- int err;
-
- if (op == MAC_SET)
- cmd = HINIC_PORT_CMD_SET_MAC;
- else
- cmd = HINIC_PORT_CMD_DEL_MAC;
-
- port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- port_mac_cmd.vlan_id = vlan_id;
- memcpy(port_mac_cmd.mac, addr, ETH_ALEN);
-
- err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd,
- sizeof(port_mac_cmd),
- &port_mac_cmd, &out_size);
- if (err || out_size != sizeof(port_mac_cmd) ||
- (port_mac_cmd.status &&
- (port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY || !HINIC_IS_VF(hwif)) &&
- port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) {
- dev_err(&pdev->dev, "Failed to change MAC, err: %d, status: 0x%x, out size: 0x%x\n",
- err, port_mac_cmd.status, out_size);
- return -EFAULT;
- }
-
- if (port_mac_cmd.status == HINIC_PF_SET_VF_ALREADY) {
- dev_warn(&pdev->dev, "PF has already set VF mac, ignore %s operation\n",
- (op == MAC_SET) ? "set" : "del");
- return HINIC_PF_SET_VF_ALREADY;
- }
-
- if (cmd == HINIC_PORT_CMD_SET_MAC && port_mac_cmd.status ==
- HINIC_MGMT_STATUS_EXIST)
- dev_warn(&pdev->dev, "MAC is repeated, ignore set operation\n");
-
- return 0;
-}
-
-/**
- * hinic_port_add_mac - add mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number to set with the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_add_mac(struct hinic_dev *nic_dev,
- const u8 *addr, u16 vlan_id)
-{
- return change_mac(nic_dev, addr, vlan_id, MAC_SET);
-}
-
-/**
- * hinic_port_del_mac - remove mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number that is connected to the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id)
-{
- return change_mac(nic_dev, addr, vlan_id, MAC_DEL);
-}
-
-/**
- * hinic_port_get_mac - get the mac address of the nic device
- * @nic_dev: nic device
- * @addr: returned mac address
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mac_cmd port_mac_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- u16 out_size = sizeof(port_mac_cmd);
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC,
- &port_mac_cmd, sizeof(port_mac_cmd),
- &port_mac_cmd, &out_size);
- if (err || out_size != sizeof(port_mac_cmd) || port_mac_cmd.status) {
- dev_err(&pdev->dev, "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n",
- err, port_mac_cmd.status, out_size);
- return -EFAULT;
- }
-
- memcpy(addr, port_mac_cmd.mac, ETH_ALEN);
- return 0;
-}
-
-/**
- * hinic_port_set_mtu - set mtu
- * @nic_dev: nic device
- * @new_mtu: new mtu
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mtu_cmd port_mtu_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- u16 out_size = sizeof(port_mtu_cmd);
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- port_mtu_cmd.mtu = new_mtu;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
- &port_mtu_cmd, sizeof(port_mtu_cmd),
- &port_mtu_cmd, &out_size);
- if (err || out_size != sizeof(port_mtu_cmd) || port_mtu_cmd.status) {
- dev_err(&pdev->dev, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x\n",
- err, port_mtu_cmd.status, out_size);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_add_vlan - add vlan to the nic device
- * @nic_dev: nic device
- * @vlan_id: the vlan number to add
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_vlan_cmd port_vlan_cmd;
-
- port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- port_vlan_cmd.vlan_id = vlan_id;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN,
- &port_vlan_cmd, sizeof(port_vlan_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_del_vlan - delete vlan from the nic device
- * @nic_dev: nic device
- * @vlan_id: the vlan number to delete
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_vlan_cmd port_vlan_cmd;
-
- port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- port_vlan_cmd.vlan_id = vlan_id;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN,
- &port_vlan_cmd, sizeof(port_vlan_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_set_rx_mode - set rx mode in the nic device
- * @nic_dev: nic device
- * @rx_mode: the rx mode to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_rx_mode_cmd rx_mode_cmd;
-
- rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- rx_mode_cmd.rx_mode = rx_mode;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE,
- &rx_mode_cmd, sizeof(rx_mode_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_link_state - get the link state
- * @nic_dev: nic device
- * @link_state: the returned link state
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_link_state(struct hinic_dev *nic_dev,
- enum hinic_port_link_state *link_state)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_port_link_cmd link_cmd;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(link_cmd);
- int err;
-
- link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
- &link_cmd, sizeof(link_cmd),
- &link_cmd, &out_size);
- if (err || out_size != sizeof(link_cmd) || link_cmd.status) {
- dev_err(&pdev->dev, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n",
- err, link_cmd.status, out_size);
- return -EINVAL;
- }
-
- *link_state = link_cmd.state;
- return 0;
-}
-
-/**
- * hinic_port_set_state - set port state
- * @nic_dev: nic device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_state_cmd port_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(port_state);
- int err;
-
- if (HINIC_IS_VF(hwdev->hwif))
- return 0;
-
- port_state.state = state;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE,
- &port_state, sizeof(port_state),
- &port_state, &out_size);
- if (err || out_size != sizeof(port_state) || port_state.status) {
- dev_err(&pdev->dev, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x\n",
- err, port_state.status, out_size);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_set_func_state- set func device state
- * @nic_dev: nic device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_func_state(struct hinic_dev *nic_dev,
- enum hinic_func_port_state state)
-{
- struct hinic_port_func_state_cmd func_state;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(func_state);
- int err;
-
- func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- func_state.state = state;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE,
- &func_state, sizeof(func_state),
- &func_state, &out_size);
- if (err || out_size != sizeof(func_state) || func_state.status) {
- dev_err(&pdev->dev, "Failed to set port func state, err: %d, status: 0x%x, out size: 0x%x\n",
- err, func_state.status, out_size);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_get_cap - get port capabilities
- * @nic_dev: nic device
- * @port_cap: returned port capabilities
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_get_cap(struct hinic_dev *nic_dev,
- struct hinic_port_cap *port_cap)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(*port_cap);
- int err;
-
- port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP,
- port_cap, sizeof(*port_cap),
- port_cap, &out_size);
- if (err || out_size != sizeof(*port_cap) || port_cap->status) {
- dev_err(&pdev->dev,
- "Failed to get port capabilities, err: %d, status: 0x%x, out size: 0x%x\n",
- err, port_cap->status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_set_tso - set port tso configuration
- * @nic_dev: nic device
- * @state: the tso state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_tso_config tso_cfg = {0};
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(tso_cfg);
- int err;
-
- tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- tso_cfg.tso_en = state;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_TSO,
- &tso_cfg, sizeof(tso_cfg),
- &tso_cfg, &out_size);
- if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) {
- dev_err(&pdev->dev,
- "Failed to set port tso, err: %d, status: 0x%x, out size: 0x%x\n",
- err, tso_cfg.status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
-{
- struct hinic_checksum_offload rx_csum_cfg = {0};
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- u16 out_size = sizeof(rx_csum_cfg);
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- int err;
-
- if (!hwdev)
- return -EINVAL;
-
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
- rx_csum_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- rx_csum_cfg.rx_csum_offload = en;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
- &rx_csum_cfg, sizeof(rx_csum_cfg),
- &rx_csum_cfg, &out_size);
- if (err || !out_size || rx_csum_cfg.status) {
- dev_err(&pdev->dev,
- "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x\n",
- err, rx_csum_cfg.status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_vlan_cfg vlan_cfg;
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- u16 out_size;
- int err;
-
- if (!hwdev)
- return -EINVAL;
-
- out_size = sizeof(vlan_cfg);
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
- vlan_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- vlan_cfg.vlan_rx_offload = en;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD,
- &vlan_cfg, sizeof(vlan_cfg),
- &vlan_cfg, &out_size);
- if (err || !out_size || vlan_cfg.status) {
- dev_err(&pdev->dev,
- "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n",
- err, vlan_cfg.status, out_size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_vlan_filter vlan_filter;
- u16 out_size = sizeof(vlan_filter);
- int err;
-
- vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- vlan_filter.enable = en;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER,
- &vlan_filter, sizeof(vlan_filter),
- &vlan_filter, &out_size);
- if (vlan_filter.status == HINIC_MGMT_CMD_UNSUPPORTED) {
- err = HINIC_MGMT_CMD_UNSUPPORTED;
- } else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
- HINIC_IS_VF(hwif)) {
- err = HINIC_MGMT_CMD_UNSUPPORTED;
- } else if (err || !out_size || vlan_filter.status) {
- dev_err(&pdev->dev,
- "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
- err, vlan_filter.status, out_size);
- err = -EINVAL;
- }
-
- return err;
-}
-
-int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_rq_num rq_num = { 0 };
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(rq_num);
- int err;
-
- rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- rq_num.num_rqs = num_rqs;
- rq_num.rq_depth = ilog2(nic_dev->rq_depth);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
- &rq_num, sizeof(rq_num),
- &rq_num, &out_size);
- if (err || !out_size || rq_num.status) {
- dev_err(&pdev->dev,
- "Failed to set rxq number, err: %d, status: 0x%x, out size: 0x%x\n",
- err, rq_num.status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en,
- u8 max_wqe_num)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_lro_config lro_cfg = { 0 };
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(lro_cfg);
- int err;
-
- lro_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- lro_cfg.lro_ipv4_en = ipv4_en;
- lro_cfg.lro_ipv6_en = ipv6_en;
- lro_cfg.lro_max_wqe_num = max_wqe_num;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO,
- &lro_cfg, sizeof(lro_cfg),
- &lro_cfg, &out_size);
- if (err || !out_size || lro_cfg.status) {
- dev_err(&pdev->dev,
- "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n",
- err, lro_cfg.status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-static int hinic_set_rx_lro_timer(struct hinic_dev *nic_dev, u32 timer_value)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_lro_timer lro_timer = { 0 };
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(lro_timer);
- int err;
-
- lro_timer.status = 0;
- lro_timer.type = 0;
- lro_timer.enable = 1;
- lro_timer.timer = timer_value;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER,
- &lro_timer, sizeof(lro_timer),
- &lro_timer, &out_size);
- if (lro_timer.status == 0xFF) {
- /* For this case, we think status (0xFF) is OK */
- lro_timer.status = 0;
- dev_dbg(&pdev->dev,
- "Set lro timer not supported by the current FW version, it will be 1ms default\n");
- }
-
- if (err || !out_size || lro_timer.status) {
- dev_err(&pdev->dev,
- "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n",
- err, lro_timer.status, out_size);
-
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
- u32 lro_timer, u32 wqe_num)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- u8 ipv4_en;
- u8 ipv6_en;
- int err;
-
- if (!hwdev)
- return -EINVAL;
-
- ipv4_en = lro_en ? 1 : 0;
- ipv6_en = lro_en ? 1 : 0;
-
- err = hinic_set_rx_lro(nic_dev, ipv4_en, ipv6_en, (u8)wqe_num);
- if (err)
- return err;
-
- if (HINIC_IS_VF(nic_dev->hwdev->hwif))
- return 0;
-
- err = hinic_set_rx_lro_timer(nic_dev, lro_timer);
- if (err)
- return err;
-
- return 0;
-}
-
-int hinic_rss_set_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
- const u32 *indir_table)
-{
- struct hinic_rss_indirect_tbl *indir_tbl;
- struct hinic_func_to_io *func_to_io;
- struct hinic_cmdq_buf cmd_buf;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- u32 indir_size;
- u64 out_param;
- int err, i;
- u32 *temp;
-
- hwdev = nic_dev->hwdev;
- func_to_io = &hwdev->func_to_io;
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- cmd_buf.size = sizeof(*indir_tbl);
-
- indir_tbl = cmd_buf.buf;
- indir_tbl->group_index = cpu_to_be32(tmpl_idx);
-
- for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
- indir_tbl->entry[i] = indir_table[i];
-
- if (0x3 == (i & 0x3)) {
- temp = (u32 *)&indir_tbl->entry[i - 3];
- *temp = cpu_to_be32(*temp);
- }
- }
-
- /* cfg the rss indirect table by command queue */
- indir_size = HINIC_RSS_INDIR_SIZE / 2;
- indir_tbl->offset = 0;
- indir_tbl->size = cpu_to_be32(indir_size);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
- &cmd_buf, &out_param);
- if (err || out_param != 0) {
- dev_err(&pdev->dev, "Failed to set rss indir table\n");
- err = -EFAULT;
- goto free_buf;
- }
-
- indir_tbl->offset = cpu_to_be32(indir_size);
- indir_tbl->size = cpu_to_be32(indir_size);
- memcpy(&indir_tbl->entry[0], &indir_tbl->entry[indir_size], indir_size);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
- &cmd_buf, &out_param);
- if (err || out_param != 0) {
- dev_err(&pdev->dev, "Failed to set rss indir table\n");
- err = -EFAULT;
- }
-
-free_buf:
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
-
- return err;
-}
-
-int hinic_rss_get_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
- u32 *indir_table)
-{
- struct hinic_rss_indir_table rss_cfg = { 0 };
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(rss_cfg);
- int err = 0, i;
-
- rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- rss_cfg.template_id = tmpl_idx;
-
- err = hinic_port_msg_cmd(hwdev,
- HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
- &rss_cfg, sizeof(rss_cfg), &rss_cfg,
- &out_size);
- if (err || !out_size || rss_cfg.status) {
- dev_err(&pdev->dev, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x\n",
- err, rss_cfg.status, out_size);
- return -EINVAL;
- }
-
- hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE);
- for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
- indir_table[i] = rss_cfg.indir[i];
-
- return 0;
-}
-
-int hinic_set_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
- struct hinic_rss_type rss_type)
-{
- struct hinic_rss_context_tbl *ctx_tbl;
- struct hinic_func_to_io *func_to_io;
- struct hinic_cmdq_buf cmd_buf;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- u64 out_param;
- u32 ctx = 0;
- int err;
-
- hwdev = nic_dev->hwdev;
- func_to_io = &hwdev->func_to_io;
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmd buf\n");
- return -ENOMEM;
- }
-
- ctx |= HINIC_RSS_TYPE_SET(1, VALID) |
- HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
- HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
- HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
- HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
- HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
- HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
- HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
- HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
-
- cmd_buf.size = sizeof(struct hinic_rss_context_tbl);
-
- ctx_tbl = (struct hinic_rss_context_tbl *)cmd_buf.buf;
- ctx_tbl->group_index = cpu_to_be32(tmpl_idx);
- ctx_tbl->offset = 0;
- ctx_tbl->size = sizeof(u32);
- ctx_tbl->size = cpu_to_be32(ctx_tbl->size);
- ctx_tbl->rsvd = 0;
- ctx_tbl->ctx = cpu_to_be32(ctx);
-
- /* cfg the rss context table by command queue */
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
- &cmd_buf, &out_param);
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
-
- if (err || out_param != 0) {
- dev_err(&pdev->dev, "Failed to set rss context table, err: %d\n",
- err);
- return -EFAULT;
- }
-
- return 0;
-}
-
-int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
- struct hinic_rss_type *rss_type)
-{
- struct hinic_rss_context_table ctx_tbl = { 0 };
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- u16 out_size = sizeof(ctx_tbl);
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- int err;
-
- if (!hwdev || !rss_type)
- return -EINVAL;
-
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- ctx_tbl.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- ctx_tbl.template_id = tmpl_idx;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL,
- &ctx_tbl, sizeof(ctx_tbl),
- &ctx_tbl, &out_size);
- if (err || !out_size || ctx_tbl.status) {
- dev_err(&pdev->dev, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n",
- err, ctx_tbl.status, out_size);
- return -EINVAL;
- }
-
- rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4);
- rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6);
- rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT);
- rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4);
- rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6);
- rss_type->tcp_ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context,
- TCP_IPV6_EXT);
- rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4);
- rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6);
-
- return 0;
-}
-
-int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id,
- const u8 *temp)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_rss_key rss_key = { 0 };
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(rss_key);
- int err;
-
- rss_key.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- rss_key.template_id = template_id;
- memcpy(rss_key.key, temp, HINIC_RSS_KEY_SIZE);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL,
- &rss_key, sizeof(rss_key),
- &rss_key, &out_size);
- if (err || !out_size || rss_key.status) {
- dev_err(&pdev->dev,
- "Failed to set rss hash key, err: %d, status: 0x%x, out size: 0x%x\n",
- err, rss_key.status, out_size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
- u8 *temp)
-{
- struct hinic_rss_template_key temp_key = { 0 };
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- u16 out_size = sizeof(temp_key);
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- int err;
-
- if (!hwdev || !temp)
- return -EINVAL;
-
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- temp_key.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- temp_key.template_id = tmpl_idx;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
- &temp_key, sizeof(temp_key),
- &temp_key, &out_size);
- if (err || !out_size || temp_key.status) {
- dev_err(&pdev->dev, "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x\n",
- err, temp_key.status, out_size);
- return -EINVAL;
- }
-
- memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE);
-
- return 0;
-}
-
-int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id,
- u8 type)
-{
- struct hinic_rss_engine_type rss_engine = { 0 };
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(rss_engine);
- int err;
-
- rss_engine.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- rss_engine.hash_engine = type;
- rss_engine.template_id = template_id;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
- &rss_engine, sizeof(rss_engine),
- &rss_engine, &out_size);
- if (err || !out_size || rss_engine.status) {
- dev_err(&pdev->dev,
- "Failed to set hash engine, err: %d, status: 0x%x, out size: 0x%x\n",
- err, rss_engine.status, out_size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx, u8 *type)
-{
- struct hinic_rss_engine_type hash_type = { 0 };
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- u16 out_size = sizeof(hash_type);
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- int err;
-
- if (!hwdev || !type)
- return -EINVAL;
-
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- hash_type.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- hash_type.template_id = tmpl_idx;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_HASH_ENGINE,
- &hash_type, sizeof(hash_type),
- &hash_type, &out_size);
- if (err || !out_size || hash_type.status) {
- dev_err(&pdev->dev, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x\n",
- err, hash_type.status, out_size);
- return -EINVAL;
- }
-
- *type = hash_type.hash_engine;
- return 0;
-}
-
-int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_rss_config rss_cfg = { 0 };
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size = sizeof(rss_cfg);
- int err;
-
- rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- rss_cfg.rss_en = rss_en;
- rss_cfg.template_id = template_id;
- rss_cfg.rq_priority_number = 0;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_CFG,
- &rss_cfg, sizeof(rss_cfg),
- &rss_cfg, &out_size);
- if (err || !out_size || rss_cfg.status) {
- dev_err(&pdev->dev,
- "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n",
- err, rss_cfg.status, out_size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx)
-{
- struct hinic_rss_template_mgmt template_mgmt = { 0 };
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- u16 out_size = sizeof(template_mgmt);
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
- &template_mgmt, sizeof(template_mgmt),
- &template_mgmt, &out_size);
- if (err || !out_size || template_mgmt.status) {
- dev_err(&pdev->dev, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\n",
- err, template_mgmt.status, out_size);
- return -EINVAL;
- }
-
- *tmpl_idx = template_mgmt.template_id;
-
- return 0;
-}
-
-int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx)
-{
- struct hinic_rss_template_mgmt template_mgmt = { 0 };
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- u16 out_size = sizeof(template_mgmt);
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- template_mgmt.template_id = tmpl_idx;
- template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
- &template_mgmt, sizeof(template_mgmt),
- &template_mgmt, &out_size);
- if (err || !out_size || template_mgmt.status) {
- dev_err(&pdev->dev, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\n",
- err, template_mgmt.status, out_size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int hinic_get_vport_stats(struct hinic_dev *nic_dev,
- struct hinic_vport_stats *stats)
-{
- struct hinic_cmd_vport_stats vport_stats = { 0 };
- struct hinic_port_stats_info stats_info = { 0 };
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- u16 out_size = sizeof(vport_stats);
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- stats_info.stats_version = HINIC_PORT_STATS_VERSION;
- stats_info.func_id = HINIC_HWIF_FUNC_IDX(hwif);
- stats_info.stats_size = sizeof(vport_stats);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT,
- &stats_info, sizeof(stats_info),
- &vport_stats, &out_size);
- if (err || !out_size || vport_stats.status) {
- dev_err(&pdev->dev,
- "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n",
- err, vport_stats.status, out_size);
- return -EFAULT;
- }
-
- memcpy(stats, &vport_stats.stats, sizeof(*stats));
- return 0;
-}
-
-int hinic_get_phy_port_stats(struct hinic_dev *nic_dev,
- struct hinic_phy_port_stats *stats)
-{
- struct hinic_port_stats_info stats_info = { 0 };
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_port_stats *port_stats;
- u16 out_size = sizeof(*port_stats);
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
- if (!port_stats)
- return -ENOMEM;
-
- stats_info.stats_version = HINIC_PORT_STATS_VERSION;
- stats_info.stats_size = sizeof(*port_stats);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS,
- &stats_info, sizeof(stats_info),
- port_stats, &out_size);
- if (err || !out_size || port_stats->status) {
- dev_err(&pdev->dev,
- "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n",
- err, port_stats->status, out_size);
- err = -EINVAL;
- goto out;
- }
-
- memcpy(stats, &port_stats->stats, sizeof(*stats));
-
-out:
- kfree(port_stats);
-
- return err;
-}
-
-int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_version_info up_ver = {0};
- u16 out_size = sizeof(up_ver);
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- int err;
-
- if (!hwdev)
- return -EINVAL;
-
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,
- &up_ver, sizeof(up_ver), &up_ver,
- &out_size);
- if (err || !out_size || up_ver.status) {
- dev_err(&pdev->dev,
- "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n",
- err, up_ver.status, out_size);
- return -EINVAL;
- }
-
- snprintf(mgmt_ver, HINIC_MGMT_VERSION_MAX_LEN, "%s", up_ver.ver);
-
- return 0;
-}
-
-int hinic_get_link_mode(struct hinic_hwdev *hwdev,
- struct hinic_link_mode_cmd *link_mode)
-{
- u16 out_size;
- int err;
-
- if (!hwdev || !link_mode)
- return -EINVAL;
-
- link_mode->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- out_size = sizeof(*link_mode);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
- link_mode, sizeof(*link_mode),
- link_mode, &out_size);
- if (err || !out_size || link_mode->status) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n",
- err, link_mode->status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable)
-{
- struct hinic_set_autoneg_cmd autoneg = {0};
- u16 out_size = sizeof(autoneg);
- int err;
-
- if (!hwdev)
- return -EINVAL;
-
- autoneg.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- autoneg.enable = enable;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_AUTONEG,
- &autoneg, sizeof(autoneg),
- &autoneg, &out_size);
- if (err || !out_size || autoneg.status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n",
- enable ? "enable" : "disable", err, autoneg.status,
- out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed)
-{
- struct hinic_speed_cmd speed_info = {0};
- u16 out_size = sizeof(speed_info);
- int err;
-
- if (!hwdev)
- return -EINVAL;
-
- speed_info.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- speed_info.speed = speed;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_SPEED,
- &speed_info, sizeof(speed_info),
- &speed_info, &out_size);
- if (err || !out_size || speed_info.status) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n",
- err, speed_info.status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_set_link_settings(struct hinic_hwdev *hwdev,
- struct hinic_link_ksettings_info *info)
-{
- u16 out_size = sizeof(*info);
- int err;
-
- err = hinic_hilink_msg_cmd(hwdev, HINIC_HILINK_CMD_SET_LINK_SETTINGS,
- info, sizeof(*info), info, &out_size);
- if ((info->status != HINIC_MGMT_CMD_UNSUPPORTED &&
- info->status) || err || !out_size) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n",
- err, info->status, out_size);
- return -EFAULT;
- }
-
- return info->status;
-}
-
-int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
- struct hinic_pause_config *pause_info)
-{
- u16 out_size = sizeof(*pause_info);
- int err;
-
- pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
- pause_info, sizeof(*pause_info),
- pause_info, &out_size);
- if (err || !out_size || pause_info->status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n",
- err, pause_info->status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
- struct hinic_pause_config *pause_info)
-{
- u16 out_size = sizeof(*pause_info);
- int err;
-
- pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
- pause_info, sizeof(*pause_info),
- pause_info, &out_size);
- if (err || !out_size || pause_info->status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n",
- err, pause_info->status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap)
-{
- struct hinic_nic_cfg *nic_cfg = &hwdev->func_to_io.nic_cfg;
- struct hinic_set_pfc pfc = {0};
- u16 out_size = sizeof(pfc);
- int err;
-
- if (HINIC_IS_VF(hwdev->hwif))
- return 0;
-
- mutex_lock(&nic_cfg->cfg_mutex);
-
- pfc.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- pfc.pfc_bitmap = pfc_bitmap;
- pfc.pfc_en = pfc_en;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PFC,
- &pfc, sizeof(pfc), &pfc, &out_size);
- if (err || pfc.status || !out_size) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to %s pfc, err: %d, status: 0x%x, out size: 0x%x\n",
- pfc_en ? "enable" : "disable", err, pfc.status,
- out_size);
- mutex_unlock(&nic_cfg->cfg_mutex);
- return -EIO;
- }
-
- /* pause settings is opposite from pfc */
- nic_cfg->rx_pause = pfc_en ? 0 : 1;
- nic_cfg->tx_pause = pfc_en ? 0 : 1;
-
- mutex_unlock(&nic_cfg->cfg_mutex);
-
- return 0;
-}
-
-int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable)
-{
- struct hinic_port_loopback lb = {0};
- u16 out_size = sizeof(lb);
- int err;
-
- lb.mode = mode;
- lb.en = enable;
-
- if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Invalid loopback mode %d to set\n", mode);
- return -EINVAL;
- }
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LOOPBACK_MODE,
- &lb, sizeof(lb), &lb, &out_size);
- if (err || !out_size || lb.status) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to set loopback mode %d en %d, err: %d, status: 0x%x, out size: 0x%x\n",
- mode, enable, err, lb.status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-static int _set_led_status(struct hinic_hwdev *hwdev, u8 port,
- enum hinic_led_type type,
- enum hinic_led_mode mode, u8 reset)
-{
- struct hinic_led_info led_info = {0};
- u16 out_size = sizeof(led_info);
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- led_info.port = port;
- led_info.reset = reset;
-
- led_info.type = type;
- led_info.mode = mode;
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_SET_LED_STATUS,
- &led_info, sizeof(led_info),
- &led_info, &out_size, HINIC_MGMT_MSG_SYNC);
- if (err || led_info.status || !out_size) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n",
- err, led_info.status, out_size);
- return -EIO;
- }
-
- return 0;
-}
-
-int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port,
- enum hinic_led_type type, enum hinic_led_mode mode)
-{
- if (!hwdev)
- return -EINVAL;
-
- return _set_led_status(hwdev, port, type, mode, 0);
-}
-
-int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port)
-{
- int err;
-
- if (!hwdev)
- return -EINVAL;
-
- err = _set_led_status(hwdev, port, HINIC_LED_TYPE_INVALID,
- HINIC_LED_MODE_INVALID, 1);
- if (err)
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to reset led status\n");
-
- return err;
-}
-
-static bool hinic_if_sfp_absent(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_get_light_module_abs sfp_abs = {0};
- u16 out_size = sizeof(sfp_abs);
- u8 port_id = hwdev->port_id;
- int err;
-
- sfp_abs.port_id = port_id;
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_SFP_ABS,
- &sfp_abs, sizeof(sfp_abs), &sfp_abs,
- &out_size);
- if (sfp_abs.status || err || !out_size) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to get port%d sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n",
- port_id, err, sfp_abs.status, out_size);
- return true;
- }
-
- return ((sfp_abs.abs_status == 0) ? false : true);
-}
-
-int hinic_get_sfp_eeprom(struct hinic_hwdev *hwdev, u8 *data, u16 *len)
-{
- struct hinic_cmd_get_std_sfp_info sfp_info = {0};
- u16 out_size = sizeof(sfp_info);
- u8 port_id;
- int err;
-
- if (!hwdev || !data || !len)
- return -EINVAL;
-
- port_id = hwdev->port_id;
-
- if (hinic_if_sfp_absent(hwdev))
- return -ENXIO;
-
- sfp_info.port_id = port_id;
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_STD_SFP_INFO,
- &sfp_info, sizeof(sfp_info), &sfp_info,
- &out_size);
- if (sfp_info.status || err || !out_size) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to get port%d sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n",
- port_id, err, sfp_info.status, out_size);
- return -EIO;
- }
-
- *len = min_t(u16, sfp_info.eeprom_len, STD_SFP_INFO_MAX_SIZE);
- memcpy(data, sfp_info.sfp_info, STD_SFP_INFO_MAX_SIZE);
-
- return 0;
-}
-
-int hinic_get_sfp_type(struct hinic_hwdev *hwdev, u8 *data0, u8 *data1)
-{
- u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
- u16 len;
- int err;
-
- if (hinic_if_sfp_absent(hwdev))
- return -ENXIO;
-
- err = hinic_get_sfp_eeprom(hwdev, sfp_data, &len);
- if (err)
- return err;
-
- *data0 = sfp_data[0];
- *data1 = sfp_data[1];
-
- return 0;
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
deleted file mode 100644
index c8694ac7c702..000000000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ /dev/null
@@ -1,900 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- */
-
-#ifndef HINIC_PORT_H
-#define HINIC_PORT_H
-
-#include <linux/types.h>
-#include <linux/ethtool.h>
-#include <linux/etherdevice.h>
-#include <linux/bitops.h>
-
-#include "hinic_dev.h"
-
-#define HINIC_RSS_KEY_SIZE 40
-#define HINIC_RSS_INDIR_SIZE 256
-#define HINIC_PORT_STATS_VERSION 0
-#define HINIC_FW_VERSION_NAME 16
-#define HINIC_COMPILE_TIME_LEN 20
-#define HINIC_MGMT_VERSION_MAX_LEN 32
-
-struct hinic_version_info {
- u8 status;
- u8 version;
- u8 rsvd[6];
-
- u8 ver[HINIC_FW_VERSION_NAME];
- u8 time[HINIC_COMPILE_TIME_LEN];
-};
-
-enum hinic_rx_mode {
- HINIC_RX_MODE_UC = BIT(0),
- HINIC_RX_MODE_MC = BIT(1),
- HINIC_RX_MODE_BC = BIT(2),
- HINIC_RX_MODE_MC_ALL = BIT(3),
- HINIC_RX_MODE_PROMISC = BIT(4),
-};
-
-enum hinic_port_link_state {
- HINIC_LINK_STATE_DOWN,
- HINIC_LINK_STATE_UP,
-};
-
-enum hinic_port_state {
- HINIC_PORT_DISABLE = 0,
- HINIC_PORT_ENABLE = 3,
-};
-
-enum hinic_func_port_state {
- HINIC_FUNC_PORT_DISABLE = 0,
- HINIC_FUNC_PORT_ENABLE = 2,
-};
-
-enum hinic_autoneg_cap {
- HINIC_AUTONEG_UNSUPPORTED,
- HINIC_AUTONEG_SUPPORTED,
-};
-
-enum hinic_autoneg_state {
- HINIC_AUTONEG_DISABLED,
- HINIC_AUTONEG_ACTIVE,
-};
-
-enum hinic_duplex {
- HINIC_DUPLEX_HALF,
- HINIC_DUPLEX_FULL,
-};
-
-enum hinic_speed {
- HINIC_SPEED_10MB_LINK = 0,
- HINIC_SPEED_100MB_LINK,
- HINIC_SPEED_1000MB_LINK,
- HINIC_SPEED_10GB_LINK,
- HINIC_SPEED_25GB_LINK,
- HINIC_SPEED_40GB_LINK,
- HINIC_SPEED_100GB_LINK,
-
- HINIC_SPEED_UNKNOWN = 0xFF,
-};
-
-enum hinic_link_mode {
- HINIC_10GE_BASE_KR = 0,
- HINIC_40GE_BASE_KR4 = 1,
- HINIC_40GE_BASE_CR4 = 2,
- HINIC_100GE_BASE_KR4 = 3,
- HINIC_100GE_BASE_CR4 = 4,
- HINIC_25GE_BASE_KR_S = 5,
- HINIC_25GE_BASE_CR_S = 6,
- HINIC_25GE_BASE_KR = 7,
- HINIC_25GE_BASE_CR = 8,
- HINIC_GE_BASE_KX = 9,
- HINIC_LINK_MODE_NUMBERS,
-
- HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
-};
-
-enum hinic_port_type {
- HINIC_PORT_TP, /* BASET */
- HINIC_PORT_AUI,
- HINIC_PORT_MII,
- HINIC_PORT_FIBRE, /* OPTICAL */
- HINIC_PORT_BNC,
- HINIC_PORT_ELEC,
- HINIC_PORT_COPPER, /* PORT_DA */
- HINIC_PORT_AOC,
- HINIC_PORT_BACKPLANE,
- HINIC_PORT_NONE = 0xEF,
- HINIC_PORT_OTHER = 0xFF,
-};
-
-enum hinic_valid_link_settings {
- HILINK_LINK_SET_SPEED = 0x1,
- HILINK_LINK_SET_AUTONEG = 0x2,
- HILINK_LINK_SET_FEC = 0x4,
-};
-
-enum hinic_tso_state {
- HINIC_TSO_DISABLE = 0,
- HINIC_TSO_ENABLE = 1,
-};
-
-struct hinic_port_mac_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 vlan_id;
- u16 rsvd1;
- unsigned char mac[ETH_ALEN];
-};
-
-struct hinic_port_mtu_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u32 mtu;
-};
-
-struct hinic_port_vlan_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 vlan_id;
-};
-
-struct hinic_port_rx_mode_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd;
- u32 rx_mode;
-};
-
-struct hinic_port_link_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 state;
- u8 rsvd1;
-};
-
-struct hinic_port_state_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 state;
- u8 rsvd1[3];
-};
-
-struct hinic_port_link_status {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 link;
- u8 port_id;
-};
-
-struct hinic_cable_plug_event {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 plugged; /* 0: unplugged, 1: plugged */
- u8 port_id;
-};
-
-enum link_err_type {
- LINK_ERR_MODULE_UNRECOGENIZED,
- LINK_ERR_NUM,
-};
-
-struct hinic_link_err_event {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 err_type;
- u8 port_id;
-};
-
-struct hinic_port_func_state_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u8 state;
- u8 rsvd2[3];
-};
-
-struct hinic_port_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u8 port_type;
- u8 autoneg_cap;
- u8 autoneg_state;
- u8 duplex;
- u8 speed;
- u8 rsvd2[3];
-};
-
-struct hinic_link_mode_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1;
- u16 supported; /* 0xFFFF represents invalid value */
- u16 advertised;
-};
-
-struct hinic_speed_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 speed;
-};
-
-struct hinic_set_autoneg_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 enable; /* 1: enable , 0: disable */
-};
-
-struct hinic_link_ksettings_info {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1;
-
- u32 valid_bitmap;
- u32 speed; /* enum nic_speed_level */
- u8 autoneg; /* 0 - off; 1 - on */
- u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
- u8 rsvd2[18]; /* reserved for duplex, port, etc. */
-};
-
-struct hinic_tso_config {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1;
- u8 tso_en;
- u8 resv2[3];
-};
-
-struct hinic_checksum_offload {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1;
- u32 rx_csum_offload;
-};
-
-struct hinic_rq_num {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1[33];
- u32 num_rqs;
- u32 rq_depth;
-};
-
-struct hinic_lro_config {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1;
- u8 lro_ipv4_en;
- u8 lro_ipv6_en;
- u8 lro_max_wqe_num;
- u8 resv2[13];
-};
-
-struct hinic_lro_timer {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 type; /* 0: set timer value, 1: get timer value */
- u8 enable; /* when set lro time, enable should be 1 */
- u16 rsvd1;
- u32 timer;
-};
-
-struct hinic_vlan_cfg {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 vlan_rx_offload;
- u8 rsvd1[5];
-};
-
-struct hinic_vlan_filter {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 rsvd1[2];
- u32 enable;
-};
-
-struct hinic_rss_template_mgmt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 cmd;
- u8 template_id;
- u8 rsvd1[4];
-};
-
-struct hinic_rss_template_key {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 template_id;
- u8 rsvd1;
- u8 key[HINIC_RSS_KEY_SIZE];
-};
-
-struct hinic_rss_context_tbl {
- u32 group_index;
- u32 offset;
- u32 size;
- u32 rsvd;
- u32 ctx;
-};
-
-struct hinic_rss_context_table {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 template_id;
- u8 rsvd1;
- u32 context;
-};
-
-struct hinic_rss_indirect_tbl {
- u32 group_index;
- u32 offset;
- u32 size;
- u32 rsvd;
- u8 entry[HINIC_RSS_INDIR_SIZE];
-};
-
-struct hinic_rss_indir_table {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 template_id;
- u8 rsvd1;
- u8 indir[HINIC_RSS_INDIR_SIZE];
-};
-
-struct hinic_rss_key {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 template_id;
- u8 rsvd1;
- u8 key[HINIC_RSS_KEY_SIZE];
-};
-
-struct hinic_rss_engine_type {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 template_id;
- u8 hash_engine;
- u8 rsvd1[4];
-};
-
-struct hinic_rss_config {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 rss_en;
- u8 template_id;
- u8 rq_priority_number;
- u8 rsvd1[11];
-};
-
-struct hinic_stats {
- char name[ETH_GSTRING_LEN];
- u32 size;
- int offset;
-};
-
-struct hinic_vport_stats {
- u64 tx_unicast_pkts_vport;
- u64 tx_unicast_bytes_vport;
- u64 tx_multicast_pkts_vport;
- u64 tx_multicast_bytes_vport;
- u64 tx_broadcast_pkts_vport;
- u64 tx_broadcast_bytes_vport;
-
- u64 rx_unicast_pkts_vport;
- u64 rx_unicast_bytes_vport;
- u64 rx_multicast_pkts_vport;
- u64 rx_multicast_bytes_vport;
- u64 rx_broadcast_pkts_vport;
- u64 rx_broadcast_bytes_vport;
-
- u64 tx_discard_vport;
- u64 rx_discard_vport;
- u64 tx_err_vport;
- u64 rx_err_vport;
-};
-
-struct hinic_phy_port_stats {
- u64 mac_rx_total_pkt_num;
- u64 mac_rx_total_oct_num;
- u64 mac_rx_bad_pkt_num;
- u64 mac_rx_bad_oct_num;
- u64 mac_rx_good_pkt_num;
- u64 mac_rx_good_oct_num;
- u64 mac_rx_uni_pkt_num;
- u64 mac_rx_multi_pkt_num;
- u64 mac_rx_broad_pkt_num;
-
- u64 mac_tx_total_pkt_num;
- u64 mac_tx_total_oct_num;
- u64 mac_tx_bad_pkt_num;
- u64 mac_tx_bad_oct_num;
- u64 mac_tx_good_pkt_num;
- u64 mac_tx_good_oct_num;
- u64 mac_tx_uni_pkt_num;
- u64 mac_tx_multi_pkt_num;
- u64 mac_tx_broad_pkt_num;
-
- u64 mac_rx_fragment_pkt_num;
- u64 mac_rx_undersize_pkt_num;
- u64 mac_rx_undermin_pkt_num;
- u64 mac_rx_64_oct_pkt_num;
- u64 mac_rx_65_127_oct_pkt_num;
- u64 mac_rx_128_255_oct_pkt_num;
- u64 mac_rx_256_511_oct_pkt_num;
- u64 mac_rx_512_1023_oct_pkt_num;
- u64 mac_rx_1024_1518_oct_pkt_num;
- u64 mac_rx_1519_2047_oct_pkt_num;
- u64 mac_rx_2048_4095_oct_pkt_num;
- u64 mac_rx_4096_8191_oct_pkt_num;
- u64 mac_rx_8192_9216_oct_pkt_num;
- u64 mac_rx_9217_12287_oct_pkt_num;
- u64 mac_rx_12288_16383_oct_pkt_num;
- u64 mac_rx_1519_max_bad_pkt_num;
- u64 mac_rx_1519_max_good_pkt_num;
- u64 mac_rx_oversize_pkt_num;
- u64 mac_rx_jabber_pkt_num;
-
- u64 mac_rx_pause_num;
- u64 mac_rx_pfc_pkt_num;
- u64 mac_rx_pfc_pri0_pkt_num;
- u64 mac_rx_pfc_pri1_pkt_num;
- u64 mac_rx_pfc_pri2_pkt_num;
- u64 mac_rx_pfc_pri3_pkt_num;
- u64 mac_rx_pfc_pri4_pkt_num;
- u64 mac_rx_pfc_pri5_pkt_num;
- u64 mac_rx_pfc_pri6_pkt_num;
- u64 mac_rx_pfc_pri7_pkt_num;
- u64 mac_rx_control_pkt_num;
- u64 mac_rx_y1731_pkt_num;
- u64 mac_rx_sym_err_pkt_num;
- u64 mac_rx_fcs_err_pkt_num;
- u64 mac_rx_send_app_good_pkt_num;
- u64 mac_rx_send_app_bad_pkt_num;
-
- u64 mac_tx_fragment_pkt_num;
- u64 mac_tx_undersize_pkt_num;
- u64 mac_tx_undermin_pkt_num;
- u64 mac_tx_64_oct_pkt_num;
- u64 mac_tx_65_127_oct_pkt_num;
- u64 mac_tx_128_255_oct_pkt_num;
- u64 mac_tx_256_511_oct_pkt_num;
- u64 mac_tx_512_1023_oct_pkt_num;
- u64 mac_tx_1024_1518_oct_pkt_num;
- u64 mac_tx_1519_2047_oct_pkt_num;
- u64 mac_tx_2048_4095_oct_pkt_num;
- u64 mac_tx_4096_8191_oct_pkt_num;
- u64 mac_tx_8192_9216_oct_pkt_num;
- u64 mac_tx_9217_12287_oct_pkt_num;
- u64 mac_tx_12288_16383_oct_pkt_num;
- u64 mac_tx_1519_max_bad_pkt_num;
- u64 mac_tx_1519_max_good_pkt_num;
- u64 mac_tx_oversize_pkt_num;
- u64 mac_tx_jabber_pkt_num;
-
- u64 mac_tx_pause_num;
- u64 mac_tx_pfc_pkt_num;
- u64 mac_tx_pfc_pri0_pkt_num;
- u64 mac_tx_pfc_pri1_pkt_num;
- u64 mac_tx_pfc_pri2_pkt_num;
- u64 mac_tx_pfc_pri3_pkt_num;
- u64 mac_tx_pfc_pri4_pkt_num;
- u64 mac_tx_pfc_pri5_pkt_num;
- u64 mac_tx_pfc_pri6_pkt_num;
- u64 mac_tx_pfc_pri7_pkt_num;
- u64 mac_tx_control_pkt_num;
- u64 mac_tx_y1731_pkt_num;
- u64 mac_tx_1588_pkt_num;
- u64 mac_tx_err_all_pkt_num;
- u64 mac_tx_from_app_good_pkt_num;
- u64 mac_tx_from_app_bad_pkt_num;
-
- u64 mac_rx_higig2_ext_pkt_num;
- u64 mac_rx_higig2_message_pkt_num;
- u64 mac_rx_higig2_error_pkt_num;
- u64 mac_rx_higig2_cpu_ctrl_pkt_num;
- u64 mac_rx_higig2_unicast_pkt_num;
- u64 mac_rx_higig2_broadcast_pkt_num;
- u64 mac_rx_higig2_l2_multicast_pkt_num;
- u64 mac_rx_higig2_l3_multicast_pkt_num;
-
- u64 mac_tx_higig2_message_pkt_num;
- u64 mac_tx_higig2_ext_pkt_num;
- u64 mac_tx_higig2_cpu_ctrl_pkt_num;
- u64 mac_tx_higig2_unicast_pkt_num;
- u64 mac_tx_higig2_broadcast_pkt_num;
- u64 mac_tx_higig2_l2_multicast_pkt_num;
- u64 mac_tx_higig2_l3_multicast_pkt_num;
-};
-
-struct hinic_port_stats_info {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1;
- u32 stats_version;
- u32 stats_size;
-};
-
-struct hinic_port_stats {
- u8 status;
- u8 version;
- u8 rsvd[6];
-
- struct hinic_phy_port_stats stats;
-};
-
-struct hinic_cmd_vport_stats {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- struct hinic_vport_stats stats;
-};
-
-struct hinic_tx_rate_cfg_max_min {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1;
- u32 min_rate;
- u32 max_rate;
- u8 rsvd2[8];
-};
-
-struct hinic_tx_rate_cfg {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1;
- u32 tx_rate;
-};
-
-enum nic_speed_level {
- LINK_SPEED_10MB = 0,
- LINK_SPEED_100MB,
- LINK_SPEED_1GB,
- LINK_SPEED_10GB,
- LINK_SPEED_25GB,
- LINK_SPEED_40GB,
- LINK_SPEED_100GB,
- LINK_SPEED_LEVELS,
-};
-
-struct hinic_spoofchk_set {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 state;
- u8 rsvd1;
- u16 func_id;
-};
-
-struct hinic_pause_config {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 rsvd1;
- u32 auto_neg;
- u32 rx_pause;
- u32 tx_pause;
-};
-
-struct hinic_set_pfc {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u8 pfc_en;
- u8 pfc_bitmap;
- u8 rsvd1[4];
-};
-
-/* get or set loopback mode, need to modify by base API */
-#define HINIC_INTERNAL_LP_MODE 5
-#define LOOP_MODE_MIN 1
-#define LOOP_MODE_MAX 6
-
-struct hinic_port_loopback {
- u8 status;
- u8 version;
- u8 rsvd[6];
-
- u32 mode;
- u32 en;
-};
-
-struct hinic_led_info {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 port;
- u8 type;
- u8 mode;
- u8 reset;
-};
-
-#define STD_SFP_INFO_MAX_SIZE 640
-
-struct hinic_cmd_get_light_module_abs {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 port_id;
- u8 abs_status; /* 0:present, 1:absent */
- u8 rsv[2];
-};
-
-#define STD_SFP_INFO_MAX_SIZE 640
-
-struct hinic_cmd_get_std_sfp_info {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 port_id;
- u8 wire_type;
- u16 eeprom_len;
- u32 rsvd;
- u8 sfp_info[STD_SFP_INFO_MAX_SIZE];
-};
-
-struct hinic_cmd_update_fw {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- struct {
- u32 SL:1;
- u32 SF:1;
- u32 flag:1;
- u32 reserved:13;
- u32 fragment_len:16;
- } ctl_info;
-
- struct {
- u32 FW_section_CRC;
- u32 FW_section_type;
- } section_info;
-
- u32 total_len;
- u32 setion_total_len;
- u32 fw_section_version;
- u32 section_offset;
- u32 data[384];
-};
-
-int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id);
-
-int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id);
-
-int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr);
-
-int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu);
-
-int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
-
-int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
-
-int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode);
-
-int hinic_port_link_state(struct hinic_dev *nic_dev,
- enum hinic_port_link_state *link_state);
-
-int hinic_port_set_state(struct hinic_dev *nic_dev,
- enum hinic_port_state state);
-
-int hinic_port_set_func_state(struct hinic_dev *nic_dev,
- enum hinic_func_port_state state);
-
-int hinic_port_get_cap(struct hinic_dev *nic_dev,
- struct hinic_port_cap *port_cap);
-
-int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs);
-
-int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
-
-int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en);
-
-int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
- u32 lro_timer, u32 wqe_num);
-
-int hinic_set_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
- struct hinic_rss_type rss_type);
-
-int hinic_rss_set_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
- const u32 *indir_table);
-
-int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id,
- const u8 *temp);
-
-int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id,
- u8 type);
-
-int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id);
-
-int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx);
-
-int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx);
-
-void hinic_set_ethtool_ops(struct net_device *netdev);
-
-int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
- struct hinic_rss_type *rss_type);
-
-int hinic_rss_get_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
- u32 *indir_table);
-
-int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
- u8 *temp);
-
-int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx,
- u8 *type);
-
-int hinic_get_phy_port_stats(struct hinic_dev *nic_dev,
- struct hinic_phy_port_stats *stats);
-
-int hinic_get_vport_stats(struct hinic_dev *nic_dev,
- struct hinic_vport_stats *stats);
-
-int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en);
-
-int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en);
-
-int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver);
-
-int hinic_set_link_settings(struct hinic_hwdev *hwdev,
- struct hinic_link_ksettings_info *info);
-
-int hinic_get_link_mode(struct hinic_hwdev *hwdev,
- struct hinic_link_mode_cmd *link_mode);
-
-int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable);
-
-int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed);
-
-int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
- struct hinic_pause_config *pause_info);
-
-int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
- struct hinic_pause_config *pause_info);
-
-int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap);
-
-int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable);
-
-enum hinic_led_mode {
- HINIC_LED_MODE_ON,
- HINIC_LED_MODE_OFF,
- HINIC_LED_MODE_FORCE_1HZ,
- HINIC_LED_MODE_FORCE_2HZ,
- HINIC_LED_MODE_FORCE_4HZ,
- HINIC_LED_MODE_1HZ,
- HINIC_LED_MODE_2HZ,
- HINIC_LED_MODE_4HZ,
- HINIC_LED_MODE_INVALID,
-};
-
-enum hinic_led_type {
- HINIC_LED_TYPE_LINK,
- HINIC_LED_TYPE_LOW_SPEED,
- HINIC_LED_TYPE_HIGH_SPEED,
- HINIC_LED_TYPE_INVALID,
-};
-
-int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port);
-
-int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port,
- enum hinic_led_type type, enum hinic_led_mode mode);
-
-int hinic_get_sfp_type(struct hinic_hwdev *hwdev, u8 *data0, u8 *data1);
-
-int hinic_get_sfp_eeprom(struct hinic_hwdev *hwdev, u8 *data, u16 *len);
-
-int hinic_open(struct net_device *netdev);
-
-int hinic_close(struct net_device *netdev);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
new file mode 100644
index 000000000000..518c815e0e77
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __HINIC_PORT_CMD_H__
+#define __HINIC_PORT_CMD_H__
+
+/* cmd of mgmt CPU message for NIC module */
+enum hinic_port_cmd {
+ HINIC_PORT_CMD_VF_REGISTER = 0x0,
+ /* not defined in base line, only for PFD and VFD */
+ HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
+ /* not defined in base line, only for PFD and VFD */
+
+ HINIC_PORT_CMD_CHANGE_MTU = 0x2,
+
+ HINIC_PORT_CMD_ADD_VLAN = 0x3,
+ HINIC_PORT_CMD_DEL_VLAN,
+
+ HINIC_PORT_CMD_SET_PFC = 0x5,
+ HINIC_PORT_CMD_GET_PFC,
+ HINIC_PORT_CMD_SET_ETS,
+ HINIC_PORT_CMD_GET_ETS,
+
+ HINIC_PORT_CMD_SET_MAC = 0x9,
+ HINIC_PORT_CMD_GET_MAC,
+ HINIC_PORT_CMD_DEL_MAC,
+
+ HINIC_PORT_CMD_SET_RX_MODE = 0xc,
+ HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xd,
+
+ HINIC_PORT_CMD_GET_AUTONEG_CAP = 0xf,
+ /* not defined in base line */
+ HINIC_PORT_CMD_GET_AUTONET_STATE,
+ /* not defined in base line */
+ HINIC_PORT_CMD_GET_SPEED,
+ /* not defined in base line */
+ HINIC_PORT_CMD_GET_DUPLEX,
+ /* not defined in base line */
+ HINIC_PORT_CMD_GET_MEDIA_TYPE,
+ /* not defined in base line */
+
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14,
+ HINIC_PORT_CMD_SET_PAUSE_INFO,
+
+ HINIC_PORT_CMD_GET_LINK_STATE = 0x18,
+ HINIC_PORT_CMD_SET_LRO = 0x19,
+ HINIC_PORT_CMD_SET_RX_CSUM = 0x1a,
+ HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1b,
+
+ HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1c,
+ HINIC_PORT_CMD_CLEAR_PORT_STATISTICS,
+ HINIC_PORT_CMD_GET_VPORT_STAT,
+ HINIC_PORT_CMD_CLEAN_VPORT_STAT,
+
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25,
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL,
+
+ HINIC_PORT_CMD_SET_PORT_ENABLE = 0x29,
+ HINIC_PORT_CMD_GET_PORT_ENABLE,
+
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2b,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
+ HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
+ HINIC_PORT_CMD_GET_RSS_HASH_ENGINE,
+ HINIC_PORT_CMD_GET_RSS_CTX_TBL,
+ HINIC_PORT_CMD_SET_RSS_CTX_TBL,
+ HINIC_PORT_CMD_RSS_TEMP_MGR,
+
+ /* 0x36 ~ 0x40 have defined in base line */
+
+ HINIC_PORT_CMD_RSS_CFG = 0x42,
+
+ HINIC_PORT_CMD_GET_PHY_TYPE = 0x44,
+ HINIC_PORT_CMD_INIT_FUNC = 0x45,
+ HINIC_PORT_CMD_SET_LLI_PRI = 0x46,
+
+ HINIC_PORT_CMD_GET_LOOPBACK_MODE = 0x48,
+ HINIC_PORT_CMD_SET_LOOPBACK_MODE,
+
+ HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4a,
+ HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE,
+
+ /* 0x4c ~ 0x57 have defined in base line */
+ HINIC_PORT_CMD_DISABLE_PROMISC = 0x4c,
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK = 0x4e,
+ HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58,
+ HINIC_PORT_CMD_GET_BOOT_VERSION,
+ HINIC_PORT_CMD_GET_MICROCODE_VERSION,
+
+ HINIC_PORT_CMD_GET_PORT_TYPE = 0x5b,
+ /* not defined in base line */
+
+ HINIC_PORT_CMD_GET_VPORT_ENABLE = 0x5c,
+ HINIC_PORT_CMD_SET_VPORT_ENABLE,
+
+ HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5e,
+
+ HINIC_PORT_CMD_SET_LED_TEST = 0x5f,
+
+ HINIC_PORT_CMD_SET_LLI_STATE = 0x60,
+ HINIC_PORT_CMD_SET_LLI_TYPE,
+ HINIC_PORT_CMD_GET_LLI_CFG,
+
+ HINIC_PORT_CMD_GET_LRO = 0x63,
+
+ HINIC_PORT_CMD_GET_DMA_CS = 0x64,
+ HINIC_PORT_CMD_SET_DMA_CS,
+
+ HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66,
+
+ HINIC_PORT_CMD_SET_PFC_MISC = 0x67,
+ HINIC_PORT_CMD_GET_PFC_MISC,
+
+ HINIC_PORT_CMD_SET_VF_RATE = 0x69,
+ HINIC_PORT_CMD_SET_VF_VLAN,
+ HINIC_PORT_CMD_CLR_VF_VLAN,
+
+ /* 0x6c,0x6e have defined in base line */
+ HINIC_PORT_CMD_SET_UCAPTURE_OPT = 0x6F,
+
+ HINIC_PORT_CMD_SET_TSO = 0x70,
+ HINIC_PORT_CMD_SET_PHY_POWER = 0x71,
+ HINIC_PORT_CMD_UPDATE_FW = 0x72,
+ HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73,
+ /* not defined in base line */
+ HINIC_PORT_CMD_SET_PFC_THD = 0x75,
+ /* not defined in base line */
+ HINIC_PORT_CMD_SET_PORT_LINK_STATUS = 0x76,
+ HINIC_PORT_CMD_SET_CGE_PAUSE_TIME_CFG = 0x77,
+
+ HINIC_PORT_CMD_GET_FW_SUPPORT_FLAG = 0x79,
+
+ HINIC_PORT_CMD_SET_PORT_REPORT = 0x7B,
+
+ HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xa0,
+
+ HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xa3,
+ HINIC_PORT_CMD_UPDATE_MAC = 0xa4,
+
+ HINIC_PORT_CMD_GET_UART_LOG = 0xa5,
+ HINIC_PORT_CMD_SET_UART_LOG,
+
+ HINIC_PORT_CMD_GET_PORT_INFO = 0xaa,
+
+ HINIC_MISC_SET_FUNC_SF_ENBITS = 0xab,
+ /* not defined in base line */
+ HINIC_MISC_GET_FUNC_SF_ENBITS,
+ /* not defined in base line */
+
+ HINIC_PORT_CMD_GET_SFP_INFO = 0xad,
+
+ HINIC_PORT_CMD_SET_NETQ = 0xc1,
+ HINIC_PORT_CMD_ADD_RQ_FILTER = 0xc2,
+ HINIC_PORT_CMD_DEL_RQ_FILTER = 0xc3,
+
+ HINIC_PORT_CMD_GET_FW_LOG = 0xca,
+ HINIC_PORT_CMD_SET_IPSU_MAC = 0xcb,
+ HINIC_PORT_CMD_GET_IPSU_MAC = 0xcc,
+
+ HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4,
+
+ HINIC_PORT_CMD_SET_IQ_ENABLE = 0xd6,
+
+ HINIC_PORT_CMD_GET_LINK_MODE = 0xD9,
+ HINIC_PORT_CMD_SET_SPEED = 0xDA,
+ HINIC_PORT_CMD_SET_AUTONEG = 0xDB,
+
+ HINIC_PORT_CMD_CLEAR_SQ_RES = 0xDD,
+ HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE,
+ HINIC_PORT_CMD_SET_VF_COS = 0xDF,
+ HINIC_PORT_CMD_GET_VF_COS = 0xE1,
+
+ HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5,
+ HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6,
+
+ HINIC_PORT_CMD_SET_PORT_FUNCS_STATE = 0xE7,
+ HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8,
+
+ HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB,
+ HINIC_PORT_CMD_GET_STD_SFP_INFO = 0xF0,
+
+ HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3,
+ HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4,
+
+ HINIC_PORT_CMD_SET_VHD_CFG = 0xF7,
+ HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8,
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 0xF9,
+ HINIC_PORT_CMD_SET_RXQ_LRO_ADPT = 0xFA,
+ HINIC_PORT_CMD_GET_SFP_ABS = 0xFB,
+ HINIC_PORT_CMD_Q_FILTER = 0xFC,
+ HINIC_PORT_CMD_TCAM_FILTER = 0xFE,
+ HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF,
+};
+
+/* cmd of mgmt CPU message for HW module */
+enum hinic_mgmt_cmd {
+ HINIC_MGMT_CMD_RESET_MGMT = 0x0,
+ HINIC_MGMT_CMD_START_FLR = 0x1,
+ HINIC_MGMT_CMD_FLUSH_DOORBELL = 0x2,
+ HINIC_MGMT_CMD_GET_IO_STATUS = 0x3,
+ HINIC_MGMT_CMD_DMA_ATTR_SET = 0x4,
+
+ HINIC_MGMT_CMD_CMDQ_CTXT_SET = 0x10,
+ HINIC_MGMT_CMD_CMDQ_CTXT_GET,
+
+ HINIC_MGMT_CMD_VAT_SET = 0x12,
+ HINIC_MGMT_CMD_VAT_GET,
+
+ HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14,
+ HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET,
+
+ HINIC_MGMT_CMD_MQM_FIX_INFO_GET = 0x16,
+ HINIC_MGMT_CMD_MQM_CFG_INFO_SET = 0x18,
+ HINIC_MGMT_CMD_MQM_SRCH_GPA_SET = 0x20,
+ HINIC_MGMT_CMD_PPF_TMR_SET = 0x22,
+ HINIC_MGMT_CMD_PPF_HT_GPA_SET = 0x23,
+ HINIC_MGMT_CMD_RES_STATE_SET = 0x24,
+ HINIC_MGMT_CMD_FUNC_CACHE_OUT = 0x25,
+ HINIC_MGMT_CMD_FFM_SET = 0x26,
+ HINIC_MGMT_CMD_SMF_TMR_CLEAR = 0x27,
+ /* 0x29 not defined in base line,
+ * only used in open source driver
+ */
+ HINIC_MGMT_CMD_FUNC_RES_CLEAR = 0x29,
+
+ HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET = 0x32,
+
+ HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
+ HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
+
+ HINIC_MGMT_CMD_VF_RANDOM_ID_SET = 0x36,
+ HINIC_MGMT_CMD_FAULT_REPORT = 0x37,
+ HINIC_MGMT_CMD_HEART_LOST_REPORT = 0x38,
+
+ HINIC_MGMT_CMD_VPD_SET = 0x40,
+ HINIC_MGMT_CMD_VPD_GET,
+ HINIC_MGMT_CMD_LABEL_SET,
+ HINIC_MGMT_CMD_LABEL_GET,
+ HINIC_MGMT_CMD_SATIC_MAC_SET,
+ HINIC_MGMT_CMD_SATIC_MAC_GET,
+ HINIC_MGMT_CMD_SYNC_TIME = 0x46,
+
+ HINIC_MGMT_CMD_REG_READ = 0x48,
+
+ HINIC_MGMT_CMD_SET_LED_STATUS = 0x4A,
+ HINIC_MGMT_CMD_L2NIC_RESET = 0x4b,
+ HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET = 0x4d,
+ HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT = 0x4E,
+ HINIC_MGMT_CMD_ACTIVATE_FW = 0x4F,
+ HINIC_MGMT_CMD_PAGESIZE_SET = 0x50,
+ HINIC_MGMT_CMD_PAGESIZE_GET = 0x51,
+ HINIC_MGMT_CMD_GET_BOARD_INFO = 0x52,
+ HINIC_MGMT_CMD_WATCHDOG_INFO = 0x56,
+ HINIC_MGMT_CMD_FMW_ACT_NTC = 0x57,
+ HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61,
+ HINIC_MGMT_CMD_GET_PPF_STATE = 0x63,
+ HINIC_MGMT_CMD_PCIE_DFX_NTC = 0x65,
+ HINIC_MGMT_CMD_PCIE_DFX_GET = 0x66,
+
+ HINIC_MGMT_CMD_GET_HOST_INFO = 0x67,
+
+ HINIC_MGMT_CMD_GET_PHY_INIT_STATUS = 0x6A,
+ HINIC_MGMT_CMD_HEARTBEAT_SUPPORTED = 0x6B,
+ HINIC_MGMT_CMD_HEARTBEAT_EVENT = 0x6C,
+ HINIC_MGMT_CMD_GET_HW_PF_INFOS = 0x6D,
+ HINIC_MGMT_CMD_GET_SDI_MODE = 0x6E,
+
+ HINIC_MGMT_CMD_ENABLE_MIGRATE = 0x6F,
+};
+
+/* uCode relates commands */
+enum hinic_ucode_cmd {
+ HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT = 0,
+ HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+ HINIC_UCODE_CMD_ARM_SQ,
+ HINIC_UCODE_CMD_ARM_RQ,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_SET_IQ_ENABLE,
+ HINIC_UCODE_CMD_SET_RQ_FLUSH = 10
+};
+
+/* software cmds, vf->pf and multi-host */
+enum hinic_sw_funcs_cmd {
+ HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER = 0x0,
+ HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER = 0x1,
+ HINIC_SW_CMD_GET_SLAVE_FUNC_NIC_STATE = 0x2,
+ HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE = 0x3,
+ HINIC_SW_CMD_SEND_MSG_TO_VF = 0x4,
+ HINIC_SW_CMD_MIGRATE_READY = 0x5,
+};
+
+enum sq_l4offload_type {
+ OFFLOAD_DISABLE = 0,
+ TCP_OFFLOAD_ENABLE = 1,
+ SCTP_OFFLOAD_ENABLE = 2,
+ UDP_OFFLOAD_ENABLE = 3,
+};
+
+enum sq_vlan_offload_flag {
+ VLAN_OFFLOAD_DISABLE = 0,
+ VLAN_OFFLOAD_ENABLE = 1,
+};
+
+enum sq_pkt_parsed_flag {
+ PKT_NOT_PARSED = 0,
+ PKT_PARSED = 1,
+};
+
+enum sq_l3_type {
+ UNKNOWN_L3TYPE = 0,
+ IPV6_PKT = 1,
+ IPV4_PKT_NO_CHKSUM_OFFLOAD = 2,
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3,
+};
+
+enum sq_md_type {
+ UNKNOWN_MD_TYPE = 0,
+};
+
+enum sq_l2type {
+ ETHERNET = 0,
+};
+
+enum sq_tunnel_l4_type {
+ NOT_TUNNEL,
+ TUNNEL_UDP_NO_CSUM,
+ TUNNEL_UDP_CSUM,
+};
+
+#define NIC_RSS_CMD_TEMP_ALLOC 0x01
+#define NIC_RSS_CMD_TEMP_FREE 0x02
+
+#define HINIC_RSS_TYPE_VALID_SHIFT 23
+#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24
+#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25
+#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26
+#define HINIC_RSS_TYPE_IPV6_SHIFT 27
+#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28
+#define HINIC_RSS_TYPE_IPV4_SHIFT 29
+#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30
+#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31
+
+#define HINIC_RSS_TYPE_SET(val, member) \
+ (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT)
+
+#define HINIC_RSS_TYPE_GET(val, member) \
+ (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
+
+enum hinic_speed {
+ HINIC_SPEED_10MB_LINK = 0,
+ HINIC_SPEED_100MB_LINK,
+ HINIC_SPEED_1000MB_LINK,
+ HINIC_SPEED_10GB_LINK,
+ HINIC_SPEED_25GB_LINK,
+ HINIC_SPEED_40GB_LINK,
+ HINIC_SPEED_100GB_LINK,
+ HINIC_SPEED_UNKNOWN = 0xFF,
+};
+
+/* In order to adapt different linux version */
+enum {
+ HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
+ HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */
+ HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */
+};
+
+#define HINIC_AF0_FUNC_GLOBAL_IDX_SHIFT 0
+#define HINIC_AF0_P2P_IDX_SHIFT 10
+#define HINIC_AF0_PCI_INTF_IDX_SHIFT 14
+#define HINIC_AF0_VF_IN_PF_SHIFT 16
+#define HINIC_AF0_FUNC_TYPE_SHIFT 24
+
+#define HINIC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF
+#define HINIC_AF0_P2P_IDX_MASK 0xF
+#define HINIC_AF0_PCI_INTF_IDX_MASK 0x3
+#define HINIC_AF0_VF_IN_PF_MASK 0xFF
+#define HINIC_AF0_FUNC_TYPE_MASK 0x1
+
+#define HINIC_AF0_GET(val, member) \
+ (((val) >> HINIC_AF0_##member##_SHIFT) & HINIC_AF0_##member##_MASK)
+
+#define HINIC_AF1_PPF_IDX_SHIFT 0
+#define HINIC_AF1_AEQS_PER_FUNC_SHIFT 8
+#define HINIC_AF1_CEQS_PER_FUNC_SHIFT 12
+#define HINIC_AF1_IRQS_PER_FUNC_SHIFT 20
+#define HINIC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24
+#define HINIC_AF1_MGMT_INIT_STATUS_SHIFT 30
+#define HINIC_AF1_PF_INIT_STATUS_SHIFT 31
+
+#define HINIC_AF1_PPF_IDX_MASK 0x1F
+#define HINIC_AF1_AEQS_PER_FUNC_MASK 0x3
+#define HINIC_AF1_CEQS_PER_FUNC_MASK 0x7
+#define HINIC_AF1_IRQS_PER_FUNC_MASK 0xF
+#define HINIC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7
+#define HINIC_AF1_MGMT_INIT_STATUS_MASK 0x1
+#define HINIC_AF1_PF_INIT_STATUS_MASK 0x1
+
+#define HINIC_AF1_GET(val, member) \
+ (((val) >> HINIC_AF1_##member##_SHIFT) & HINIC_AF1_##member##_MASK)
+
+#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_SHIFT 16
+#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF
+
+#define HINIC_AF2_GET(val, member) \
+ (((val) >> HINIC_AF2_##member##_SHIFT) & HINIC_AF2_##member##_MASK)
+
+#define HINIC_AF4_OUTBOUND_CTRL_SHIFT 0
+#define HINIC_AF4_DOORBELL_CTRL_SHIFT 1
+#define HINIC_AF4_OUTBOUND_CTRL_MASK 0x1
+#define HINIC_AF4_DOORBELL_CTRL_MASK 0x1
+
+#define HINIC_AF4_GET(val, member) \
+ (((val) >> HINIC_AF4_##member##_SHIFT) & HINIC_AF4_##member##_MASK)
+
+#define HINIC_AF4_SET(val, member) \
+ (((val) & HINIC_AF4_##member##_MASK) << HINIC_AF4_##member##_SHIFT)
+
+#define HINIC_AF4_CLEAR(val, member) \
+ ((val) & (~(HINIC_AF4_##member##_MASK << \
+ HINIC_AF4_##member##_SHIFT)))
+
+#define HINIC_AF5_PF_STATUS_SHIFT 0
+#define HINIC_AF5_PF_STATUS_MASK 0xFFFF
+
+#define HINIC_AF5_SET(val, member) \
+ (((val) & HINIC_AF5_##member##_MASK) << HINIC_AF5_##member##_SHIFT)
+
+#define HINIC_AF5_GET(val, member) \
+ (((val) >> HINIC_AF5_##member##_SHIFT) & HINIC_AF5_##member##_MASK)
+
+#define HINIC_AF5_CLEAR(val, member) \
+ ((val) & (~(HINIC_AF5_##member##_MASK << \
+ HINIC_AF5_##member##_SHIFT)))
+
+#define HINIC_PPF_ELECTION_IDX_SHIFT 0
+
+#define HINIC_PPF_ELECTION_IDX_MASK 0x1F
+
+#define HINIC_PPF_ELECTION_SET(val, member) \
+ (((val) & HINIC_PPF_ELECTION_##member##_MASK) << \
+ HINIC_PPF_ELECTION_##member##_SHIFT)
+
+#define HINIC_PPF_ELECTION_GET(val, member) \
+ (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \
+ HINIC_PPF_ELECTION_##member##_MASK)
+
+#define HINIC_PPF_ELECTION_CLEAR(val, member) \
+ ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \
+ << HINIC_PPF_ELECTION_##member##_SHIFT)))
+
+#define HINIC_MPF_ELECTION_IDX_SHIFT 0
+
+#define HINIC_MPF_ELECTION_IDX_MASK 0x1F
+
+#define HINIC_MPF_ELECTION_SET(val, member) \
+ (((val) & HINIC_MPF_ELECTION_##member##_MASK) << \
+ HINIC_MPF_ELECTION_##member##_SHIFT)
+
+#define HINIC_MPF_ELECTION_GET(val, member) \
+ (((val) >> HINIC_MPF_ELECTION_##member##_SHIFT) & \
+ HINIC_MPF_ELECTION_##member##_MASK)
+
+#define HINIC_MPF_ELECTION_CLEAR(val, member) \
+ ((val) & (~(HINIC_MPF_ELECTION_##member##_MASK \
+ << HINIC_MPF_ELECTION_##member##_SHIFT)))
+
+#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
+#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
+#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs)
+#define HINIC_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx)
+#define HINIC_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf)
+#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
+#define HINIC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx)
+
+#define HINIC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type)
+#define HINIC_IS_PF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PF)
+#define HINIC_IS_VF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_VF)
+#define HINIC_IS_PPF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PPF)
+
+#define DB_IDX(db, db_base) \
+ ((u32)(((ulong)(db) - (ulong)(db_base)) / \
+ HINIC_DB_PAGE_SIZE))
+
+enum hinic_pcie_nosnoop {
+ HINIC_PCIE_SNOOP = 0,
+ HINIC_PCIE_NO_SNOOP = 1,
+};
+
+enum hinic_pcie_tph {
+ HINIC_PCIE_TPH_DISABLE = 0,
+ HINIC_PCIE_TPH_ENABLE = 1,
+};
+
+enum hinic_outbound_ctrl {
+ ENABLE_OUTBOUND = 0x0,
+ DISABLE_OUTBOUND = 0x1,
+};
+
+enum hinic_doorbell_ctrl {
+ ENABLE_DOORBELL = 0x0,
+ DISABLE_DOORBELL = 0x1,
+};
+
+enum hinic_pf_status {
+ HINIC_PF_STATUS_INIT = 0x0,
+ HINIC_PF_STATUS_ACTIVE_FLAG = 0x11,
+ HINIC_PF_STATUS_FLR_START_FLAG = 0x12,
+ HINIC_PF_STATUS_FLR_FINISH_FLAG = 0x13,
+};
+
+/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128 */
+#define HINIC_DB_DWQE_SIZE 0x00080000
+/* BMGW & VMGW VF db size 256k, have no dwqe space */
+#define HINIC_GW_VF_DB_SIZE 0x00040000
+
+/* db/dwqe page size: 4K */
+#define HINIC_DB_PAGE_SIZE 0x00001000ULL
+
+#define HINIC_DB_MAX_AREAS (HINIC_DB_DWQE_SIZE / HINIC_DB_PAGE_SIZE)
+
+#define HINIC_PCI_MSIX_ENTRY_SIZE 16
+#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+
+#endif /* __HINIC_PORT_CMD_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
new file mode 100644
index 000000000000..c58c1cf4b38f
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h
@@ -0,0 +1,459 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __HINIC_QE_DEF_H__
+#define __HINIC_QE_DEF_H__
+
+#define HINIC_SQ_WQEBB_SIZE 64
+#define HINIC_RQ_WQE_SIZE 32
+#define HINIC_SQ_WQEBB_SHIFT 6
+#define HINIC_RQ_WQEBB_SHIFT 5
+
+#define HINIC_MAX_QUEUE_DEPTH 4096
+#define HINIC_MIN_QUEUE_DEPTH 128
+#define HINIC_TXD_ALIGN 1
+#define HINIC_RXD_ALIGN 1
+
+#define HINIC_SQ_DEPTH 1024
+#define HINIC_RQ_DEPTH 1024
+
+#define HINIC_RQ_WQE_MAX_SIZE 32
+
+#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8) >> 3)//lint !e767
+
+/************** SQ_CTRL ***************/
+#define SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define SQ_CTRL_TASKSECT_LEN_SHIFT 16
+#define SQ_CTRL_DATA_FORMAT_SHIFT 22
+#define SQ_CTRL_LEN_SHIFT 29
+#define SQ_CTRL_OWNER_SHIFT 31
+
+#define SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU
+#define SQ_CTRL_TASKSECT_LEN_MASK 0x1FU
+#define SQ_CTRL_DATA_FORMAT_MASK 0x1U
+#define SQ_CTRL_LEN_MASK 0x3U
+#define SQ_CTRL_OWNER_MASK 0x1U
+
+#define SQ_CTRL_GET(val, member) (((val) >> SQ_CTRL_##member##_SHIFT) \
+ & SQ_CTRL_##member##_MASK)
+
+#define SQ_CTRL_CLEAR(val, member) ((val) & \
+ (~(SQ_CTRL_##member##_MASK << \
+ SQ_CTRL_##member##_SHIFT)))
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2
+#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10
+#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11
+#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12
+#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
+#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27
+#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28
+#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU
+#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU
+#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U
+#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U
+
+#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
+ (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) \
+ << SQ_CTRL_QUEUE_INFO_##member##_SHIFT)
+
+#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
+ (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) \
+ & SQ_CTRL_QUEUE_INFO_##member##_MASK)
+
+#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \
+ ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \
+ SQ_CTRL_QUEUE_INFO_##member##_SHIFT)))
+
+#define SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
+#define SQ_TASK_INFO0_L4OFFLOAD_SHIFT 8
+#define SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
+#define SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12
+#define SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13
+#define SQ_TASK_INFO0_UFO_AVD_SHIFT 14
+#define SQ_TASK_INFO0_TSO_UFO_SHIFT 15
+#define SQ_TASK_INFO0_VLAN_TAG_SHIFT 16
+
+#define SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFFU
+#define SQ_TASK_INFO0_L4OFFLOAD_MASK 0x3U
+#define SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3U
+#define SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1U
+#define SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1U
+#define SQ_TASK_INFO0_UFO_AVD_MASK 0x1U
+#define SQ_TASK_INFO0_TSO_UFO_MASK 0x1U
+#define SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFFU
+
+#define SQ_TASK_INFO0_SET(val, member) \
+ (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \
+ SQ_TASK_INFO0_##member##_SHIFT)
+#define SQ_TASK_INFO0_GET(val, member) \
+ (((val) >> SQ_TASK_INFO0_##member##_SHIFT) & \
+ SQ_TASK_INFO0_##member##_MASK)
+
+#define SQ_TASK_INFO1_MD_TYPE_SHIFT 8
+#define SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16
+#define SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24
+
+#define SQ_TASK_INFO1_MD_TYPE_MASK 0xFFU
+#define SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFFU
+#define SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFFU
+
+#define SQ_TASK_INFO1_SET(val, member) \
+ (((val) & SQ_TASK_INFO1_##member##_MASK) << \
+ SQ_TASK_INFO1_##member##_SHIFT)
+#define SQ_TASK_INFO1_GET(val, member) \
+ (((val) >> SQ_TASK_INFO1_##member##_SHIFT) & \
+ SQ_TASK_INFO1_##member##_MASK)
+
+#define SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0
+#define SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8
+#define SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16
+#define SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24
+
+#define SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFFU
+#define SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFFU
+#define SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7U
+#define SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3U
+
+#define SQ_TASK_INFO2_SET(val, member) \
+ (((val) & SQ_TASK_INFO2_##member##_MASK) << \
+ SQ_TASK_INFO2_##member##_SHIFT)
+#define SQ_TASK_INFO2_GET(val, member) \
+ (((val) >> SQ_TASK_INFO2_##member##_SHIFT) & \
+ SQ_TASK_INFO2_##member##_MASK)
+
+#define SQ_TASK_INFO4_L2TYPE_SHIFT 31
+
+#define SQ_TASK_INFO4_L2TYPE_MASK 0x1U
+
+#define SQ_TASK_INFO4_SET(val, member) \
+ (((u32)(val) & SQ_TASK_INFO4_##member##_MASK) << \
+ SQ_TASK_INFO4_##member##_SHIFT)
+
+/********************* SQ_DB *********************/
+#define SQ_DB_OFF 0x00000800
+#define SQ_DB_INFO_HI_PI_SHIFT 0
+#define SQ_DB_INFO_QID_SHIFT 8
+#define SQ_DB_INFO_CFLAG_SHIFT 23
+#define SQ_DB_INFO_COS_SHIFT 24
+#define SQ_DB_INFO_TYPE_SHIFT 27
+#define SQ_DB_INFO_HI_PI_MASK 0xFFU
+#define SQ_DB_INFO_QID_MASK 0x3FFU
+#define SQ_DB_INFO_CFLAG_MASK 0x1U
+#define SQ_DB_INFO_COS_MASK 0x7U
+#define SQ_DB_INFO_TYPE_MASK 0x1FU
+#define SQ_DB_INFO_SET(val, member) \
+ (((u32)(val) & SQ_DB_INFO_##member##_MASK) << \
+ SQ_DB_INFO_##member##_SHIFT)
+
+#define SQ_DB_PI_LOW_MASK 0xFF
+#define SQ_DB_PI_LOW(pi) ((pi) & SQ_DB_PI_LOW_MASK)
+#define SQ_DB_PI_HI_SHIFT 8
+#define SQ_DB_PI_HIGH(pi) ((pi) >> SQ_DB_PI_HI_SHIFT)
+#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_addr + SQ_DB_OFF) + \
+ SQ_DB_PI_LOW(pi))
+#define SQ_DB 1
+#define SQ_CFLAG_DP 0 /* CFLAG_DATA_PATH */
+
+/*********************** RQ_CTRL ******************/
+#define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define RQ_CTRL_COMPLETE_FORMAT_SHIFT 15
+#define RQ_CTRL_COMPLETE_LEN_SHIFT 27
+#define RQ_CTRL_LEN_SHIFT 29
+
+#define RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU
+#define RQ_CTRL_COMPLETE_FORMAT_MASK 0x1U
+#define RQ_CTRL_COMPLETE_LEN_MASK 0x3U
+#define RQ_CTRL_LEN_MASK 0x3U
+
+#define RQ_CTRL_SET(val, member) \
+ (((val) & RQ_CTRL_##member##_MASK) << \
+ RQ_CTRL_##member##_SHIFT)
+
+#define RQ_CTRL_GET(val, member) \
+ (((val) >> RQ_CTRL_##member##_SHIFT) & \
+ RQ_CTRL_##member##_MASK)
+
+#define RQ_CTRL_CLEAR(val, member) \
+ ((val) & (~(RQ_CTRL_##member##_MASK << \
+ RQ_CTRL_##member##_SHIFT)))
+
+#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0
+#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16
+#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25
+#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26
+#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27
+
+#define RQ_CQE_STATUS_BP_EN_SHIFT 30
+#define RQ_CQE_STATUS_RXDONE_SHIFT 31
+#define RQ_CQE_STATUS_FLUSH_SHIFT 28
+
+#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU
+#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU
+#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U
+#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U
+#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U
+#define RQ_CQE_STATUS_BP_EN_MASK 0X1U
+#define RQ_CQE_STATUS_RXDONE_MASK 0x1U
+#define RQ_CQE_STATUS_FLUSH_MASK 0x1U
+
+#define RQ_CQE_STATUS_GET(val, member) \
+ (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \
+ RQ_CQE_STATUS_##member##_MASK)
+
+#define RQ_CQE_STATUS_CLEAR(val, member) \
+ ((val) & (~(RQ_CQE_STATUS_##member##_MASK << \
+ RQ_CQE_STATUS_##member##_SHIFT)))
+
+#define RQ_CQE_SGE_VLAN_SHIFT 0
+#define RQ_CQE_SGE_LEN_SHIFT 16
+
+#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU
+#define RQ_CQE_SGE_LEN_MASK 0xFFFFU
+
+#define RQ_CQE_SGE_GET(val, member) \
+ (((val) >> RQ_CQE_SGE_##member##_SHIFT) & \
+ RQ_CQE_SGE_##member##_MASK)
+
+#define RQ_CQE_PKT_NUM_SHIFT 1
+#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19
+#define RQ_CQE_PKT_LAST_LEN_SHIFT 6
+#define RQ_CQE_SUPER_CQE_EN_SHIFT 0
+
+#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU
+#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU
+#define RQ_CQE_PKT_NUM_MASK 0x1FU
+#define RQ_CQE_SUPER_CQE_EN_MASK 0x1
+
+#define RQ_CQE_PKT_NUM_GET(val, member) \
+ (((val) >> RQ_CQE_PKT_##member##_SHIFT) & \
+ RQ_CQE_PKT_##member##_MASK)
+#define HINIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM)
+
+#define RQ_CQE_SUPER_CQE_EN_GET(val, member) \
+ (((val) >> RQ_CQE_##member##_SHIFT) & \
+ RQ_CQE_##member##_MASK)
+#define HINIC_GET_SUPER_CQE_EN(pkt_info) \
+ RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN)
+
+#define HINIC_GET_SUPER_CQE_EN_BE(pkt_info) ((pkt_info) & 0x1000000U)
+#define RQ_CQE_PKT_LEN_GET(val, member) \
+ (((val) >> RQ_CQE_PKT_##member##_SHIFT) & \
+ RQ_CQE_PKT_##member##_MASK)
+
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19
+#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U
+
+#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24
+#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU
+
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \
+ (((val) >> RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \
+ RQ_CQE_OFFOLAD_TYPE_##member##_MASK)
+
+#define RQ_CQE_PKT_TYPES_NON_L2_MASK 0x800U
+#define RQ_CQE_PKT_TYPES_L2_MASK 0x7FU
+
+#define RQ_CQE_STATUS_CSUM_BYPASS_VAL 0x80
+#define RQ_CQE_STATUS_CSUM_ERR_IP_MASK 0x31U
+#define RQ_CQE_STATUS_CSUM_ERR_L4_MASK 0x4EU
+
+#define SECT_SIZE_BYTES(size) ((size) << 3)
+
+#define HINIC_PF_SET_VF_ALREADY 0x4
+#define HINIC_MGMT_STATUS_EXIST 0x6
+
+#define WQS_BLOCKS_PER_PAGE 4
+
+#define WQ_SIZE(wq) ((u32)((u64)(wq)->q_depth * (wq)->wqebb_size))
+
+#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \
+ ((wq)->num_q_pages - 1))
+
+#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \
+ ((idx) & ((wq)->num_wqebbs_per_page - 1)))
+
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_PAGE_ADDR_SIZE_SHIFT 3
+#define WQ_PAGE_ADDR(wq, idx) \
+ ((u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \
+ (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT))))
+
+#define WQ_BLOCK_SIZE 4096UL
+#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
+#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT)
+
+#define CMDQ_BLOCKS_PER_PAGE 8
+#define CMDQ_BLOCK_SIZE 512UL
+#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \
+ CMDQ_BLOCK_SIZE), PAGE_SIZE)
+
+#define ADDR_4K_ALIGNED(addr) (0 == ((addr) & 0xfff))
+#define ADDR_256K_ALIGNED(addr) (0 == ((addr) & 0x3ffff))
+
+#define WQ_BASE_VADDR(wqs, wq) \
+ ((u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE))
+
+#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \
+ + (u64)(wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_ADDR(wqs, wq) \
+ ((u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE))
+
+#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
+ ((u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE))
+
+#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
+ (((u64)((cmdq_pages)->cmdq_page_paddr)) \
+ + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE)
+
+#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
+ ((u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \
+ + (wq)->block_idx * CMDQ_BLOCK_SIZE))
+
+#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
+
+#define WQE_SHADOW_PAGE(wq, wqe) \
+ ((u16)(((ulong)(wqe) - (ulong)(wq)->shadow_wqe) \
+ / (wq)->max_wqe_size))
+
+#define WQE_IN_RANGE(wqe, start, end) \
+ (((ulong)(wqe) >= (ulong)(start)) && \
+ ((ulong)(wqe) < (ulong)(end)))
+
+#define WQ_NUM_PAGES(num_wqs) \
+ (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE)
+
+/* Qe buffer relates define */
+enum hinic_rx_buf_size {
+ HINIC_RX_BUF_SIZE_32B = 0x20,
+ HINIC_RX_BUF_SIZE_64B = 0x40,
+ HINIC_RX_BUF_SIZE_96B = 0x60,
+ HINIC_RX_BUF_SIZE_128B = 0x80,
+ HINIC_RX_BUF_SIZE_192B = 0xC0,
+ HINIC_RX_BUF_SIZE_256B = 0x100,
+ HINIC_RX_BUF_SIZE_384B = 0x180,
+ HINIC_RX_BUF_SIZE_512B = 0x200,
+ HINIC_RX_BUF_SIZE_768B = 0x300,
+ HINIC_RX_BUF_SIZE_1K = 0x400,
+ HINIC_RX_BUF_SIZE_1_5K = 0x600,
+ HINIC_RX_BUF_SIZE_2K = 0x800,
+ HINIC_RX_BUF_SIZE_3K = 0xC00,
+ HINIC_RX_BUF_SIZE_4K = 0x1000,
+ HINIC_RX_BUF_SIZE_8K = 0x2000,
+ HINIC_RX_BUF_SIZE_16K = 0x4000,
+};
+
+enum ppf_tmr_status {
+ HINIC_PPF_TMR_FLAG_STOP,
+ HINIC_PPF_TMR_FLAG_START,
+};
+
+enum hinic_res_state {
+ HINIC_RES_CLEAN = 0,
+ HINIC_RES_ACTIVE = 1,
+};
+
+#define DEFAULT_RX_BUF_SIZE ((u16)0xB)
+
+#define BUF_DESC_SIZE_SHIFT 4
+
+#define HINIC_SQ_WQE_SIZE(num_sge) \
+ (sizeof(struct hinic_sq_ctrl) + \
+ sizeof(struct hinic_sq_task) + \
+ (u32)((num_sge) << BUF_DESC_SIZE_SHIFT))
+
+#define HINIC_SQ_WQEBB_CNT(num_sge) \
+ ((int)(ALIGN(HINIC_SQ_WQE_SIZE((u32)num_sge), \
+ HINIC_SQ_WQEBB_SIZE) >> HINIC_SQ_WQEBB_SHIFT))
+
+#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN)
+
+#define HINIC_GET_RSS_TYPES(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE)
+
+#define HINIC_GET_PKT_TYPES(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
+
+#define HINIC_GET_RX_PKT_TYPE(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
+
+#define HINIC_GET_RX_PKT_UMBCAST(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST)
+
+#define HINIC_GET_RX_VLAN_TAG(vlan_len) \
+ RQ_CQE_SGE_GET(vlan_len, VLAN)
+
+#define HINIC_GET_RX_PKT_LEN(vlan_len) \
+ RQ_CQE_SGE_GET(vlan_len, LEN)
+
+#define HINIC_GET_RX_CSUM_ERR(status) \
+ RQ_CQE_STATUS_GET(status, CSUM_ERR)
+
+#define HINIC_GET_RX_DONE(status) \
+ RQ_CQE_STATUS_GET(status, RXDONE)
+
+#define HINIC_GET_RX_FLUSH(status) \
+ RQ_CQE_STATUS_GET(status, FLUSH)
+
+#define HINIC_GET_RX_BP_EN(status) \
+ RQ_CQE_STATUS_GET(status, BP_EN)
+
+#define HINIC_GET_RX_NUM_LRO(status) \
+ RQ_CQE_STATUS_GET(status, NUM_LRO)
+
+#define HINIC_PKT_TYPES_UNKNOWN(pkt_types) \
+ ((pkt_types) & RQ_CQE_PKT_TYPES_NON_L2_MASK)
+
+#define HINIC_PKT_TYPES_L2(pkt_types) \
+ ((pkt_types) & RQ_CQE_PKT_TYPES_L2_MASK)
+
+#define HINIC_CSUM_ERR_BYPASSED(csum_err) \
+ ((csum_err) == RQ_CQE_STATUS_CSUM_BYPASS_VAL)
+
+#define HINIC_CSUM_ERR_IP(csum_err) \
+ ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_IP_MASK)
+
+#define HINIC_CSUM_ERR_L4(csum_err) \
+ ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_L4_MASK)
+
+#define TX_MSS_DEFAULT 0x3E00
+#define TX_MSS_MIN 0x50
+
+enum sq_wqe_type {
+ SQ_NORMAL_WQE = 0,
+};
+
+enum rq_completion_fmt {
+ RQ_COMPLETE_SGE = 1
+};
+
+#endif /* __HINIC_QE_DEF_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_qp.c
new file mode 100644
index 000000000000..c61df2f96283
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_qp.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include "hinic_nic_io.h"
+#include "hinic_qp.h"
+
+#define BUF_DESC_SHIFT 1
+#define BUF_DESC_SIZE(nr_descs) (((u32)nr_descs) << BUF_DESC_SHIFT)
+
+void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info,
+ int nr_descs, u8 owner)
+{
+ u32 ctrl_size, task_size, bufdesc_size;
+
+ ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
+ task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
+ bufdesc_size = BUF_DESC_SIZE(nr_descs);
+
+ ctrl->ctrl_fmt = SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
+ SQ_CTRL_SET(task_size, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(ctrl_size, LEN) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ ctrl->ctrl_fmt = be32_to_cpu(ctrl->ctrl_fmt);
+
+ ctrl->queue_info = queue_info;
+ ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC);
+
+ if (!SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS)) {
+ ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS);
+ } else if (SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS) < TX_MSS_MIN) {
+ /* mss should not less than 80 */
+ ctrl->queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(ctrl->queue_info,
+ MSS);
+ ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS);
+ }
+ ctrl->queue_info = be32_to_cpu(ctrl->queue_info);
+}
+
+int hinic_get_rx_done(struct hinic_rq_cqe *cqe)
+{
+ u32 status;
+ int rx_done;
+
+ status = be32_to_cpu(cqe->status);
+
+ rx_done = RQ_CQE_STATUS_GET(status, RXDONE);
+ if (!rx_done)
+ return 0;
+
+ return 1;
+}
+
+void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old)
+{
+ u32 status;
+
+ status = RQ_CQE_STATUS_CLEAR(status_old, RXDONE);
+
+ cqe->status = cpu_to_be32(status);
+
+ /* Make sure Rxdone has been set */
+ wmb();
+}
+
+int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe)
+{
+ u32 pkt_info;
+ int super_cqe_en;
+
+ pkt_info = be32_to_cpu(cqe->pkt_info);
+
+ super_cqe_en = RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN);
+ if (!super_cqe_en)
+ return 0;
+
+ return 1;
+}
+
+u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe)
+{
+ u32 vlan_len = be32_to_cpu(cqe->vlan_len);
+
+ return RQ_CQE_SGE_GET(vlan_len, LEN);
+}
+
+u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe)
+{
+ u32 pkt_num = be32_to_cpu(cqe->pkt_info);
+
+ return RQ_CQE_PKT_NUM_GET(pkt_num, NUM);
+}
+
+u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe,
+ bool last)
+{
+ u32 pkt_len = be32_to_cpu(cqe->pkt_info);
+
+ if (!last)
+ return RQ_CQE_PKT_LEN_GET(pkt_len, FIRST_LEN);
+ else
+ return RQ_CQE_PKT_LEN_GET(pkt_len, LAST_LEN);
+}
+
+void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr,
+ dma_addr_t cqe_dma)
+{
+ struct hinic_rq_wqe *rq_wqe = (struct hinic_rq_wqe *)wqe;
+ struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
+ struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
+ struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
+ u32 rq_ceq_len = sizeof(struct hinic_rq_cqe);
+
+ ctrl->ctrl_fmt =
+ RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
+ RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |
+ RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |
+ RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
+
+ hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);
+
+ buf_desc->addr_high = upper_32_bits(buf_addr);
+ buf_desc->addr_low = lower_32_bits(buf_addr);
+}
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum sq_l4offload_type l4_offload,
+ u32 l4_len, u32 offset)
+{
+ u32 tcp_udp_cs = 0, sctp = 0;
+ u32 mss = TX_MSS_DEFAULT;
+
+ /* tcp_udp_cs should be setted to calculate outter checksum when vxlan
+ * packets without inner l3 and l4
+ */
+ if (unlikely(l4_offload == SCTP_OFFLOAD_ENABLE))
+ sctp = 1;
+ else
+ tcp_udp_cs = 1;
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD);
+ task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) |
+ SQ_CTRL_QUEUE_INFO_SET(tcp_udp_cs, TCPUDP_CS) |
+ SQ_CTRL_QUEUE_INFO_SET(sctp, SCTP);
+
+ *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS);
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+}
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum sq_l4offload_type l4_offload,
+ u32 l4_len,
+ u32 offset, u32 ip_ident, u32 mss)
+{
+ u32 tso = 0, ufo = 0;
+
+ if (l4_offload == TCP_OFFLOAD_ENABLE)
+ tso = 1;
+ else if (l4_offload == UDP_OFFLOAD_ENABLE)
+ ufo = 1;
+
+ task->ufo_v6_identify = be32_to_cpu(ip_ident);
+ /* just keep the same code style here */
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(tso || ufo, TSO_UFO);
+ task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) |
+ SQ_CTRL_QUEUE_INFO_SET(tso, TSO) |
+ SQ_CTRL_QUEUE_INFO_SET(ufo, UFO) |
+ SQ_CTRL_QUEUE_INFO_SET(!!l4_offload, TCPUDP_CS);
+ /* cs must be calculate by hw if tso is enable */
+
+ *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS);
+ /* qsf was initialized in prepare_sq_wqe */
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+}
+
+void hinic_set_vlan_tx_offload(struct hinic_sq_task *task,
+ u32 *queue_info,
+ u16 vlan_tag, u16 vlan_pri)
+{
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(vlan_pri, PRI);
+}
+
+void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len)
+{
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(l2hdr_len, L2HDR_LEN);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_qp.h
new file mode 100644
index 000000000000..005aaf130440
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_qp.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_QP_H
+#define HINIC_QP_H
+
+#include "hinic_qe_def.h"
+#include "hinic_port_cmd.h"
+
+/* frags and linner */
+#define HINIC_MAX_SQ_BUFDESCS (MAX_SKB_FRAGS + 1)
+#define HINIC_MAX_SQ_SGE 17
+#define HINIC_MAX_SKB_NR_FRAGE (HINIC_MAX_SQ_SGE - 1)
+#define HINIC_GSO_MAX_SIZE 65536
+
+struct hinic_sq_ctrl {
+ u32 ctrl_fmt;
+ u32 queue_info;
+};
+
+struct hinic_sq_task {
+ u32 pkt_info0;
+ u32 pkt_info1;
+ u32 pkt_info2;
+ u32 ufo_v6_identify;
+ u32 pkt_info4;
+ u32 rsvd5;
+};
+
+struct hinic_sq_bufdesc {
+ u32 hi_addr;
+ u32 lo_addr;
+ u32 len;
+ u32 rsvd;
+};
+
+struct hinic_sq_wqe {
+ struct hinic_sq_ctrl ctrl;
+ struct hinic_sq_task task;
+ struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS];
+};
+
+struct hinic_rq_ctrl {
+ u32 ctrl_fmt;
+};
+
+struct hinic_rq_cqe {
+ u32 status;
+ u32 vlan_len;
+
+ u32 offload_type;
+ u32 hash_val;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 rsvd6;
+ u32 pkt_info;
+};
+
+struct hinic_rq_cqe_sect {
+ struct hinic_sge sge;
+ u32 rsvd;
+};
+
+struct hinic_rq_bufdesc {
+ u32 addr_high;
+ u32 addr_low;
+};
+
+struct hinic_rq_wqe {
+ struct hinic_rq_ctrl ctrl;
+ u32 rsvd;
+ struct hinic_rq_cqe_sect cqe_sect;
+ struct hinic_rq_bufdesc buf_desc;
+};
+
+void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info,
+ int nr_descs, u8 owner);
+
+u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe);
+
+int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe);
+
+u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe, bool last);
+
+u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe);
+
+int hinic_get_rx_done(struct hinic_rq_cqe *cqe);
+
+void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old);
+
+void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr,
+ dma_addr_t cqe_dma);
+
+static inline void hinic_task_set_outter_l3(struct hinic_sq_task *task,
+ enum sq_l3_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info2 |= SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) |
+ SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN);
+}
+
+static inline void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
+ enum sq_tunnel_l4_type l4_type,
+ u32 tunnel_len)
+{
+ task->pkt_info2 |= SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
+ SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN);
+}
+
+static inline void hinic_task_set_inner_l3(struct hinic_sq_task *task,
+ enum sq_l3_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE);
+ task->pkt_info1 |= SQ_TASK_INFO1_SET(network_len, INNER_L3LEN);
+}
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum sq_l4offload_type l4_offload,
+ u32 l4_len, u32 offset);
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum sq_l4offload_type l4_offload, u32 l4_len,
+ u32 offset, u32 ip_ident, u32 mss);
+
+void hinic_set_vlan_tx_offload(struct hinic_sq_task *task, u32 *queue_info,
+ u16 vlan_tag, u16 vlan_pri);
+
+void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index ceec8be2a73b..8f5db7fe8408 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -1,339 +1,505 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
*/
-#include <linux/kernel.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
-#include <linux/prefetch.h>
-#include <linux/cpumask.h>
-#include <linux/if_vlan.h>
-#include <asm/barrier.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_dev.h"
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic_dev.h"
+#include "hinic_qp.h"
#include "hinic_rx.h"
-#include "hinic_dev.h"
-#define RX_IRQ_NO_PENDING 0
-#define RX_IRQ_NO_COALESC 0
-#define RX_IRQ_NO_LLI_TIMER 0
-#define RX_IRQ_NO_CREDIT 0
-#define RX_IRQ_NO_RESEND_TIMER 0
-#define HINIC_RX_BUFFER_WRITE 16
+static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev);
-#define HINIC_RX_IPV6_PKT 7
-#define LRO_PKT_HDR_LEN_IPV4 66
-#define LRO_PKT_HDR_LEN_IPV6 86
-#define LRO_REPLENISH_THLD 256
+#define HINIC_RX_HDR_SIZE 256
+#define HINIC_RX_IPV6_PKT 7
+#define HINIC_RX_VXLAN_PKT 0xb
-#define LRO_PKT_HDR_LEN(cqe) \
- (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
- HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
+#define RXQ_STATS_INC(rxq, field) \
+{ \
+ u64_stats_update_begin(&(rxq)->rxq_stats.syncp); \
+ (rxq)->rxq_stats.field++; \
+ u64_stats_update_end(&(rxq)->rxq_stats.syncp); \
+}
-/**
- * hinic_rxq_clean_stats - Clean the statistics of specific queue
- * @rxq: Logical Rx Queue
- **/
-static void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
+static bool rx_alloc_mapped_page(struct hinic_rxq *rxq,
+ struct hinic_rx_info *rx_info)
{
- struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->pkts = 0;
- rxq_stats->bytes = 0;
- rxq_stats->errors = 0;
- rxq_stats->csum_errors = 0;
- rxq_stats->other_errors = 0;
- u64_stats_update_end(&rxq_stats->syncp);
-}
+ struct page *page = rx_info->page;
+ dma_addr_t dma = rx_info->buf_dma_addr;
-/**
- * hinic_rxq_get_stats - get statistics of Rx Queue
- * @rxq: Logical Rx Queue
- * @stats: return updated stats here
- **/
-void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
-{
- struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
- unsigned int start;
+ if (likely(dma))
+ return true;
- do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- stats->pkts = rxq_stats->pkts;
- stats->bytes = rxq_stats->bytes;
- stats->errors = rxq_stats->csum_errors +
- rxq_stats->other_errors;
- stats->csum_errors = rxq_stats->csum_errors;
- stats->other_errors = rxq_stats->other_errors;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
-}
+ /* alloc new page for storage */
+ page = dev_alloc_pages(nic_dev->page_order);
+ if (unlikely(!page)) {
+ RXQ_STATS_INC(rxq, alloc_rx_buf_err);
+ return false;
+ }
-/**
- * rxq_stats_init - Initialize the statistics of specific queue
- * @rxq: Logical Rx Queue
- **/
-static void rxq_stats_init(struct hinic_rxq *rxq)
-{
- struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
+ /* map page for use */
+ dma = dma_map_page(&pdev->dev, page, 0, rxq->dma_rx_buff_size,
+ DMA_FROM_DEVICE);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (unlikely(dma_mapping_error(&pdev->dev, dma))) {
+ RXQ_STATS_INC(rxq, map_rx_buf_err);
+ __free_pages(page, nic_dev->page_order);
+ return false;
+ }
- u64_stats_init(&rxq_stats->syncp);
- hinic_rxq_clean_stats(rxq);
+ rx_info->page = page;
+ rx_info->buf_dma_addr = dma;
+ rx_info->page_offset = 0;
+
+ return true;
}
-static void rx_csum(struct hinic_rxq *rxq, u32 status,
- struct sk_buff *skb)
+static int hinic_rx_fill_wqe(struct hinic_rxq *rxq)
{
struct net_device *netdev = rxq->netdev;
- u32 csum_err;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_rq_wqe *rq_wqe;
+ struct hinic_rx_info *rx_info;
+ dma_addr_t dma_addr = 0;
+ u16 pi = 0;
+ int rq_wqe_len;
+ int i;
- csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
+ for (i = 0; i < rxq->q_depth; i++) {
+ rx_info = &rxq->rx_info[i];
- if (!(netdev->features & NETIF_F_RXCSUM))
- return;
+ rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);
+ if (!rq_wqe) {
+ nicif_err(nic_dev, drv, netdev, "Failed to get rq wqe, rxq id: %d, wqe id: %d\n",
+ rxq->q_id, i);
+ break;
+ }
- if (!csum_err) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
- HINIC_RX_CSUM_IPSU_OTHER_ERR)))
- rxq->rxq_stats.csum_errors++;
- skb->ip_summed = CHECKSUM_NONE;
+ hinic_prepare_rq_wqe(rq_wqe, pi, dma_addr, rx_info->cqe_dma);
+
+ rq_wqe_len = sizeof(struct hinic_rq_wqe);
+ hinic_cpu_to_be32(rq_wqe, rq_wqe_len);
+ rx_info->rq_wqe = rq_wqe;
}
+
+ hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, rxq->q_depth);
+
+ return i;
}
-/**
- * rx_alloc_skb - allocate skb and map it to dma address
- * @rxq: rx queue
- * @dma_addr: returned dma address for the skb
- *
- * Return skb
- **/
-static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
- dma_addr_t *dma_addr)
-{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct sk_buff *skb;
- dma_addr_t addr;
- int err;
+static int hinic_rx_fill_buffers(struct hinic_rxq *rxq)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_rq_wqe *rq_wqe;
+ struct hinic_rx_info *rx_info;
+ dma_addr_t dma_addr;
+ int i;
+ int free_wqebbs = rxq->delta - 1;
- skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
- if (!skb)
- return NULL;
+ for (i = 0; i < free_wqebbs; i++) {
+ rx_info = &rxq->rx_info[rxq->next_to_update];
- addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
- DMA_FROM_DEVICE);
- err = dma_mapping_error(&pdev->dev, addr);
- if (err) {
- dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
- goto err_rx_map;
+ if (unlikely(!rx_alloc_mapped_page(rxq, rx_info)))
+ break;
+
+ dma_addr = rx_info->buf_dma_addr + rx_info->page_offset;
+
+ rq_wqe = rx_info->rq_wqe;
+
+ rq_wqe->buf_desc.addr_high =
+ cpu_to_be32(upper_32_bits(dma_addr));
+ rq_wqe->buf_desc.addr_low =
+ cpu_to_be32(lower_32_bits(dma_addr));
+ rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
}
- *dma_addr = addr;
- return skb;
+ if (likely(i)) {
+ /* Write all the wqes before pi update */
+ wmb();
-err_rx_map:
- dev_kfree_skb_any(skb);
- return NULL;
+ hinic_update_rq_hw_pi(nic_dev->hwdev, rxq->q_id,
+ rxq->next_to_update);
+ rxq->delta -= i;
+ rxq->next_to_alloc = rxq->next_to_update;
+ } else if (free_wqebbs == rxq->q_depth - 1) {
+ RXQ_STATS_INC(rxq, rx_buf_empty);
+ }
+
+ return i;
}
-/**
- * rx_unmap_skb - unmap the dma address of the skb
- * @rxq: rx queue
- * @dma_addr: dma address of the skb
- **/
-static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
+void hinic_rx_free_buffers(struct hinic_rxq *rxq)
{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
+ u16 i;
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct hinic_rx_info *rx_info;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rxq->q_depth; i++) {
+ rx_info = &rxq->rx_info[i];
+
+ if (rx_info->buf_dma_addr) {
+ dma_unmap_page(rxq->dev, rx_info->buf_dma_addr,
+ rxq->dma_rx_buff_size,
+ DMA_FROM_DEVICE);
+ rx_info->buf_dma_addr = 0;
+ }
- dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
- DMA_FROM_DEVICE);
+ if (rx_info->page) {
+ __free_pages(rx_info->page, nic_dev->page_order);
+ rx_info->page = NULL;
+ }
+ }
}
-/**
- * rx_free_skb - unmap and free skb
- * @rxq: rx queue
- * @skb: skb to free
- * @dma_addr: dma address of the skb
- **/
-static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
- dma_addr_t dma_addr)
+static void hinic_reuse_rx_page(struct hinic_rxq *rxq,
+ struct hinic_rx_info *old_rx_info)
{
- rx_unmap_skb(rxq, dma_addr);
- dev_kfree_skb_any(skb);
+ struct hinic_rx_info *new_rx_info;
+ u16 nta = rxq->next_to_alloc;
+
+ new_rx_info = &rxq->rx_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0;
+
+ new_rx_info->page = old_rx_info->page;
+ new_rx_info->page_offset = old_rx_info->page_offset;
+ new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr,
+ new_rx_info->page_offset,
+ rxq->buf_len,
+ DMA_FROM_DEVICE);
}
-/**
- * rx_alloc_pkts - allocate pkts in rx queue
- * @rxq: rx queue
- *
- * Return number of skbs allocated
- **/
-static int rx_alloc_pkts(struct hinic_rxq *rxq)
+static bool hinic_add_rx_frag(struct hinic_rxq *rxq,
+ struct hinic_rx_info *rx_info,
+ struct sk_buff *skb, u32 size)
{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- struct hinic_rq_wqe *rq_wqe;
- unsigned int free_wqebbs;
- struct hinic_sge sge;
- dma_addr_t dma_addr;
- struct sk_buff *skb;
- u16 prod_idx;
- int i;
+ struct page *page;
+ u8 *va;
+
+ page = rx_info->page;
+ va = (u8 *)page_address(page) + rx_info->page_offset;
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ dma_sync_single_range_for_cpu(rxq->dev,
+ rx_info->buf_dma_addr,
+ rx_info->page_offset,
+ rxq->buf_len,
+ DMA_FROM_DEVICE);
+
+ if (size <= HINIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
+ memcpy(__skb_put(skb, size), va,
+ ALIGN(size, sizeof(long))); /*lint !e666*/
+
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
- free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ (int)rx_info->page_offset, (int)size, rxq->buf_len);
- /* Limit the allocation chunks */
- if (free_wqebbs > nic_dev->rx_weight)
- free_wqebbs = nic_dev->rx_weight;
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
- for (i = 0; i < free_wqebbs; i++) {
- skb = rx_alloc_skb(rxq, &dma_addr);
- if (!skb)
- goto skb_out;
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
- hinic_set_sge(&sge, dma_addr, skb->len);
+ /* flip page offset to other buffer */
+ rx_info->page_offset ^= rxq->buf_len;
- rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
- &prod_idx);
- if (!rq_wqe) {
- rx_free_skb(rxq, skb, dma_addr);
- goto skb_out;
+ page_ref_inc(page);
+
+ return true;
+}
+
+static void __packaging_skb(struct hinic_rxq *rxq, struct sk_buff *head_skb,
+ u8 sge_num, u32 pkt_len)
+{
+ struct hinic_rx_info *rx_info;
+ struct sk_buff *skb;
+ u8 frag_num = 0;
+ u32 size;
+ u16 sw_ci;
+
+ sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask;
+ skb = head_skb;
+ while (sge_num) {
+ rx_info = &rxq->rx_info[sw_ci];
+ sw_ci = (sw_ci + 1) & rxq->q_mask;
+ if (unlikely(pkt_len > rxq->buf_len)) {
+ size = rxq->buf_len;
+ pkt_len -= rxq->buf_len;
+ } else {
+ size = pkt_len;
}
- hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
+ if (unlikely(frag_num == MAX_SKB_FRAGS)) {
+ frag_num = 0;
+ if (skb == head_skb)
+ skb = skb_shinfo(skb)->frag_list;
+ else
+ skb = skb->next;
+ }
- hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
+ if (unlikely(skb != head_skb)) {
+ head_skb->len += size;
+ head_skb->data_len += size;
+ head_skb->truesize += rxq->buf_len;
+ }
+
+ if (likely(hinic_add_rx_frag(rxq, rx_info, skb, size))) {
+ hinic_reuse_rx_page(rxq, rx_info);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rxq->dev, rx_info->buf_dma_addr,
+ rxq->dma_rx_buff_size, DMA_FROM_DEVICE);
+ }
+ /* clear contents of buffer_info */
+ rx_info->buf_dma_addr = 0;
+ rx_info->page = NULL;
+ sge_num--;
+ frag_num++;
}
+}
-skb_out:
- if (i) {
- wmb(); /* write all the wqes before update PI */
+static struct sk_buff *hinic_fetch_rx_buffer(struct hinic_rxq *rxq, u32 pkt_len)
+{
+ struct sk_buff *head_skb, *cur_skb, *skb = NULL;
+ struct net_device *netdev = rxq->netdev;
+ u8 sge_num, skb_num;
+ u16 wqebb_cnt = 0;
+
+ head_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE);
+ if (unlikely(!head_skb))
+ return NULL;
+
+ sge_num = (u8)(pkt_len >> rxq->rx_buff_shift) +
+ ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ if (likely(sge_num <= MAX_SKB_FRAGS))
+ skb_num = 1;
+ else
+ skb_num = (sge_num / MAX_SKB_FRAGS) +
+ ((sge_num % MAX_SKB_FRAGS) ? 1 : 0);
+
+ while (unlikely(skb_num > 1)) {
+ cur_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE);
+ if (unlikely(!cur_skb))
+ goto alloc_skb_fail;
+
+ if (!skb) {
+ skb_shinfo(head_skb)->frag_list = cur_skb;
+ skb = cur_skb;
+ } else {
+ skb->next = cur_skb;
+ skb = cur_skb;
+ }
- hinic_rq_update(rxq->rq, prod_idx);
+ skb_num--;
}
- return i;
+ prefetchw(head_skb->data);
+ wqebb_cnt = sge_num;
+
+ __packaging_skb(rxq, head_skb, sge_num, pkt_len);
+
+ rxq->cons_idx += wqebb_cnt;
+ rxq->delta += wqebb_cnt;
+
+ return head_skb;
+
+alloc_skb_fail:
+ dev_kfree_skb_any(head_skb);
+ return NULL;
}
-/**
- * free_all_rx_skbs - free all skbs in rx queue
- * @rxq: rx queue
- **/
-static void free_all_rx_skbs(struct hinic_rxq *rxq)
+void hinic_rxq_get_stats(struct hinic_rxq *rxq,
+ struct hinic_rxq_stats *stats)
{
- struct hinic_rq *rq = rxq->rq;
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_sge sge;
- u16 ci;
-
- while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
- if (IS_ERR(hw_wqe))
- break;
+ struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
+ unsigned int start;
- hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
+ u64_stats_update_begin(&stats->syncp);
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ stats->bytes = rxq_stats->bytes;
+ stats->packets = rxq_stats->packets;
+ stats->errors = rxq_stats->csum_errors +
+ rxq_stats->other_errors;
+ stats->csum_errors = rxq_stats->csum_errors;
+ stats->other_errors = rxq_stats->other_errors;
+ stats->dropped = rxq_stats->dropped;
+ stats->rx_buf_empty = rxq_stats->rx_buf_empty;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ u64_stats_update_end(&stats->syncp);
+}
- hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
+void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats)
+{
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->bytes = 0;
+ rxq_stats->packets = 0;
+ rxq_stats->errors = 0;
+ rxq_stats->csum_errors = 0;
+ rxq_stats->other_errors = 0;
+ rxq_stats->dropped = 0;
- rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
- }
+ rxq_stats->alloc_skb_err = 0;
+ rxq_stats->alloc_rx_buf_err = 0;
+ rxq_stats->map_rx_buf_err = 0;
+ rxq_stats->rx_buf_empty = 0;
+ u64_stats_update_end(&rxq_stats->syncp);
}
-/**
- * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
- * @rxq: rx queue
- * @head_skb: the first skb in the list
- * @left_pkt_len: left size of the pkt exclude head skb
- * @ci: consumer index
- *
- * Return number of wqes that used for the left of the pkt
- **/
-static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
- unsigned int left_pkt_len, u16 ci)
+static void rxq_stats_init(struct hinic_rxq *rxq)
{
- struct sk_buff *skb, *curr_skb = head_skb;
- struct hinic_rq_wqe *rq_wqe;
- unsigned int curr_len;
- struct hinic_sge sge;
- int num_wqes = 0;
+ struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
- while (left_pkt_len > 0) {
- rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
- &skb, &ci);
+ u64_stats_init(&rxq_stats->syncp);
+ hinic_rxq_clean_stats(rxq_stats);
+}
- num_wqes++;
+static void hinic_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, HINIC_RX_HDR_SIZE);
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, HINIC_RX_HDR_SIZE);
+ frag->bv_offset += HINIC_RX_HDR_SIZE;
+ skb->data_len -= HINIC_RX_HDR_SIZE;
+ skb->tail += HINIC_RX_HDR_SIZE;
+}
- hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
+static void hinic_rx_csum(struct hinic_rxq *rxq, u32 status,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rxq->netdev;
+ u32 csum_err;
- rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
+ csum_err = HINIC_GET_RX_CSUM_ERR(status);
- prefetch(skb->data);
+ if (unlikely(csum_err == HINIC_RX_CSUM_IPSU_OTHER_ERR))
+ rxq->rxq_stats.other_errors++;
- curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
- left_pkt_len;
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
- left_pkt_len -= curr_len;
+ if (!csum_err) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ /* pkt type is recognized by HW, and csum is err */
+ if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
+ HINIC_RX_CSUM_IPSU_OTHER_ERR)))
+ rxq->rxq_stats.csum_errors++;
- __skb_put(skb, curr_len);
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
- if (curr_skb == head_skb)
- skb_shinfo(head_skb)->frag_list = skb;
- else
- curr_skb->next = skb;
+static void hinic_rx_gro(struct hinic_rxq *rxq, u32 offload_type,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rxq->netdev;
+ bool l2_tunnel;
- head_skb->len += skb->len;
- head_skb->data_len += skb->len;
- head_skb->truesize += skb->truesize;
+ if (!(netdev->features & NETIF_F_GRO))
+ return;
- curr_skb = skb;
- }
+ l2_tunnel = HINIC_GET_RX_PKT_TYPE(offload_type) == HINIC_RX_VXLAN_PKT ?
+ 1 : 0;
- return num_wqes;
+ if (l2_tunnel && skb->ip_summed == CHECKSUM_UNNECESSARY)
+ /* If we checked the outer header let the stack know */
+ skb->csum_level = 1;
}
-static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
+static void hinic_copy_lp_data(struct hinic_nic_dev *nic_dev,
struct sk_buff *skb)
{
struct net_device *netdev = nic_dev->netdev;
u8 *lb_buf = nic_dev->lb_test_rx_buf;
+ void *frag_data;
int lb_len = nic_dev->lb_pkt_len;
int pkt_offset, frag_len, i;
- void *frag_data = NULL;
if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
nic_dev->lb_test_rx_idx = 0;
- netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n");
+ nicif_warn(nic_dev, rx_err, netdev, "Loopback test warning, recive too more test pkt\n");
}
if (skb->len != nic_dev->lb_pkt_len) {
- netif_warn(nic_dev, drv, netdev, "Wrong packet length\n");
+ nicif_warn(nic_dev, rx_err, netdev, "Wrong packet length\n");
nic_dev->lb_test_rx_idx++;
return;
}
pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
frag_len = (int)skb_headlen(skb);
- memcpy(lb_buf + pkt_offset, skb->data, frag_len);
+ memcpy((lb_buf + pkt_offset), skb->data, frag_len);
pkt_offset += frag_len;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
@@ -344,288 +510,602 @@ static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
nic_dev->lb_test_rx_idx++;
}
-/**
- * rxq_recv - Rx handler
- * @rxq: rx queue
- * @budget: maximum pkts to process
- *
- * Return number of pkts received
- **/
-static int rxq_recv(struct hinic_rxq *rxq, int budget)
+int recv_one_pkt(struct hinic_rxq *rxq, struct hinic_rq_cqe *rx_cqe,
+ u32 pkt_len, u32 vlan_len, u32 status)
{
- struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
- struct net_device *netdev = rxq->netdev;
- u64 pkt_len = 0, rx_bytes = 0;
- struct hinic_rq *rq = rxq->rq;
- struct hinic_rq_wqe *rq_wqe;
- struct hinic_dev *nic_dev;
- unsigned int free_wqebbs;
- struct hinic_rq_cqe *cqe;
- int num_wqes, pkts = 0;
- struct hinic_sge sge;
- unsigned int status;
struct sk_buff *skb;
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
u32 offload_type;
- u16 ci, num_lro;
- u16 num_wqe = 0;
- u32 vlan_len;
- u16 vid;
- nic_dev = netdev_priv(netdev);
+ skb = hinic_fetch_rx_buffer(rxq, pkt_len);
+ if (unlikely(!skb)) {
+ RXQ_STATS_INC(rxq, alloc_skb_err);
+ return -ENOMEM;
+ }
- while (pkts < budget) {
- num_wqes = 0;
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ hinic_pull_tail(skb);
- rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
- &ci);
- if (!rq_wqe)
- break;
+ hinic_rx_csum(rxq, status, skb);
- /* make sure we read rx_done before packet length */
- dma_rmb();
+ offload_type = be32_to_cpu(rx_cqe->offload_type);
+ hinic_rx_gro(rxq, offload_type, skb);
- cqe = rq->cqe[ci];
- status = be32_to_cpu(cqe->status);
- hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
+ u16 vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
- rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
+ /* if the packet is a vlan pkt, the vid may be 0 */
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
- rx_csum(rxq, status, skb);
+ if (unlikely(test_bit(HINIC_LP_TEST, &nic_dev->flags)))
+ hinic_copy_lp_data(nic_dev, skb);
- prefetch(skb->data);
+ skb_record_rx_queue(skb, rxq->q_id);
+ skb->protocol = eth_type_trans(skb, netdev);
- pkt_len = sge.len;
+ if (skb_has_frag_list(skb)) {
+ napi_gro_flush(&rxq->irq_cfg->napi, false);
+ netif_receive_skb(skb);
+ } else {
+ napi_gro_receive(&rxq->irq_cfg->napi, skb);
+ }
- if (pkt_len <= HINIC_RX_BUF_SZ) {
- __skb_put(skb, pkt_len);
- } else {
- __skb_put(skb, HINIC_RX_BUF_SZ);
- num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
- HINIC_RX_BUF_SZ, ci);
- }
+ return 0;
+}
+
+void rx_pass_super_cqe(struct hinic_rxq *rxq, u32 index, u32 pkt_num,
+ struct hinic_rq_cqe *cqe)
+{
+ u8 sge_num = 0;
+ u32 pkt_len;
+
+ while (index < pkt_num) {
+ pkt_len = hinic_get_pkt_len_for_super_cqe
+ (cqe, index == (pkt_num - 1));
+ sge_num += (u8)(pkt_len >> rxq->rx_buff_shift) +
+ ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ index++;
+ }
- hinic_rq_put_wqe(rq, ci,
- (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
+ rxq->cons_idx += sge_num;
+ rxq->delta += sge_num;
+}
- offload_type = be32_to_cpu(cqe->offload_type);
- vlan_len = be32_to_cpu(cqe->len);
- if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
- vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+static inline int __recv_supper_cqe(struct hinic_rxq *rxq,
+ struct hinic_rq_cqe *rx_cqe, u32 pkt_info,
+ u32 vlan_len, u32 status, int *pkts,
+ u64 *rx_bytes, u32 *dropped)
+{
+ u32 pkt_len;
+ int i, pkt_num = 0;
+
+ pkt_num = HINIC_GET_RQ_CQE_PKT_NUM(pkt_info);
+ i = 0;
+ while (i < pkt_num) {
+ pkt_len = ((i == (pkt_num - 1)) ?
+ RQ_CQE_PKT_LEN_GET(pkt_info, LAST_LEN) :
+ RQ_CQE_PKT_LEN_GET(pkt_info, FIRST_LEN));
+ if (unlikely(recv_one_pkt(rxq, rx_cqe, pkt_len,
+ vlan_len, status))) {
+ if (i) {
+ rx_pass_super_cqe(rxq, i,
+ pkt_num,
+ rx_cqe);
+ *dropped += (pkt_num - i);
+ }
+ break;
}
- if (unlikely(nic_dev->flags & HINIC_LP_TEST))
- hinic_copy_lp_data(nic_dev, skb);
+ *rx_bytes += pkt_len;
+ (*pkts)++;
+ i++;
+ }
- skb_record_rx_queue(skb, qp->q_id);
- skb->protocol = eth_type_trans(skb, rxq->netdev);
+ if (!i)
+ return -EFAULT;
- napi_gro_receive(&rxq->napi, skb);
+ return 0;
+}
- pkts++;
- rx_bytes += pkt_len;
+#define LRO_PKT_HDR_LEN_IPV4 66
+#define LRO_PKT_HDR_LEN_IPV6 86
+#define LRO_PKT_HDR_LEN(cqe) \
+ (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
+ HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
- num_lro = HINIC_GET_RX_NUM_LRO(status);
- if (num_lro) {
- rx_bytes += ((num_lro - 1) *
- LRO_PKT_HDR_LEN(cqe));
+int hinic_rx_poll(struct hinic_rxq *rxq, int budget)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u32 status, pkt_len, vlan_len, pkt_info, dropped = 0;
+ struct hinic_rq_cqe *rx_cqe;
+ u64 rx_bytes = 0;
+ u16 sw_ci, num_lro;
+ int pkts = 0, nr_pkts = 0;
+ u16 num_wqe = 0;
- num_wqe +=
- (u16)(pkt_len >> rxq->rx_buff_shift) +
- ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ while (likely(pkts < budget)) {
+ sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask;
+ rx_cqe = rxq->rx_info[sw_ci].cqe;
+ status = be32_to_cpu(rx_cqe->status);
+
+ if (!HINIC_GET_RX_DONE(status))
+ break;
+
+ /* make sure we read rx_done before packet length */
+ rmb();
+
+ vlan_len = be32_to_cpu(rx_cqe->vlan_len);
+ pkt_info = be32_to_cpu(rx_cqe->pkt_info);
+ pkt_len = HINIC_GET_RX_PKT_LEN(vlan_len);
+
+ if (unlikely(HINIC_GET_SUPER_CQE_EN(pkt_info))) {
+ if (unlikely(__recv_supper_cqe(rxq, rx_cqe, pkt_info,
+ vlan_len, status, &pkts,
+ &rx_bytes, &dropped)))
+ break;
+ nr_pkts += (int)HINIC_GET_RQ_CQE_PKT_NUM(pkt_info);
+ } else {
+ if (recv_one_pkt(rxq, rx_cqe, pkt_len,
+ vlan_len, status))
+ break;
+ rx_bytes += pkt_len;
+ pkts++;
+ nr_pkts++;
+
+ num_lro = HINIC_GET_RX_NUM_LRO(status);
+ if (num_lro) {
+ rx_bytes += ((num_lro - 1) *
+ LRO_PKT_HDR_LEN(rx_cqe));
+
+ num_wqe +=
+ (u16)(pkt_len >> rxq->rx_buff_shift) +
+ ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ }
}
- cqe->status = 0;
+ rx_cqe->status = 0;
- if (num_wqe >= LRO_REPLENISH_THLD)
+ if (num_wqe >= nic_dev->lro_replenish_thld)
break;
}
- free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
- if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
- rx_alloc_pkts(rxq);
+ if (rxq->delta >= HINIC_RX_BUFFER_WRITE)
+ hinic_rx_fill_buffers(rxq);
u64_stats_update_begin(&rxq->rxq_stats.syncp);
- rxq->rxq_stats.pkts += pkts;
+ rxq->rxq_stats.packets += nr_pkts;
rxq->rxq_stats.bytes += rx_bytes;
+ rxq->rxq_stats.dropped += dropped;
u64_stats_update_end(&rxq->rxq_stats.syncp);
-
return pkts;
}
-static int rx_poll(struct napi_struct *napi, int budget)
+static int rx_alloc_cqe(struct hinic_rxq *rxq)
{
- struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- struct hinic_rq *rq = rxq->rq;
- int pkts;
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic_rx_info *rx_info;
+ struct hinic_rq_cqe *cqe_va;
+ dma_addr_t cqe_pa;
+ u32 cqe_mem_size;
+ int idx;
+
+ cqe_mem_size = sizeof(*rx_info->cqe) * rxq->q_depth;
+ rxq->cqe_start_vaddr = dma_alloc_coherent(&pdev->dev, cqe_mem_size,
+ &rxq->cqe_start_paddr,
+ GFP_KERNEL);
+ if (!rxq->cqe_start_vaddr) {
+ nicif_err(nic_dev, drv, rxq->netdev, "Failed to allocate cqe dma\n");
+ return -ENOMEM;
+ }
- pkts = rxq_recv(rxq, budget);
- if (pkts >= budget)
- return budget;
+ cqe_va = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr;
+ cqe_pa = rxq->cqe_start_paddr;
- napi_complete(napi);
+ for (idx = 0; idx < rxq->q_depth; idx++) {
+ rx_info = &rxq->rx_info[idx];
+ rx_info->cqe = cqe_va;
+ rx_info->cqe_dma = cqe_pa;
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- rq->msix_entry,
- HINIC_MSIX_ENABLE);
+ cqe_va++;
+ cqe_pa += sizeof(*rx_info->cqe);
+ }
- return pkts;
+ hinic_rq_cqe_addr_set(nic_dev->hwdev, rxq->q_id, rxq->cqe_start_paddr);
+ return 0;
}
-static void rx_add_napi(struct hinic_rxq *rxq)
+static void rx_free_cqe(struct hinic_rxq *rxq)
{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ u32 cqe_mem_size;
+
+ cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
+
+ dma_free_coherent(&pdev->dev, cqe_mem_size,
+ rxq->cqe_start_vaddr, rxq->cqe_start_paddr);
+}
+
+static int hinic_setup_rx_resources(struct hinic_rxq *rxq,
+ struct net_device *netdev,
+ struct irq_info *entry)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u64 rx_info_sz;
+ int err, pkts;
+
+ rxq->irq_id = entry->irq_id;
+ rxq->msix_entry_idx = entry->msix_entry_idx;
+ rxq->next_to_alloc = 0;
+ rxq->next_to_update = 0;
+ rxq->delta = rxq->q_depth;
+ rxq->q_mask = rxq->q_depth - 1;
+ rxq->cons_idx = 0;
+
+ rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
+ if (!rx_info_sz) {
+ nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size rx info\n");
+ return -EINVAL;
+ }
+
+ rxq->rx_info = kzalloc(rx_info_sz, GFP_KERNEL);
+ if (!rxq->rx_info)
+ return -ENOMEM;
+
+ err = rx_alloc_cqe(rxq);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx cqe\n");
+ goto rx_cqe_err;
+ }
+
+ pkts = hinic_rx_fill_wqe(rxq);
+ if (pkts != rxq->q_depth) {
+ nicif_err(nic_dev, drv, netdev, "Failed to fill rx wqe\n");
+ err = -ENOMEM;
+ goto rx_pkts_err;
+ }
+ pkts = hinic_rx_fill_buffers(rxq);
+ if (!pkts) {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx buffer\n");
+ err = -ENOMEM;
+ goto rx_pkts_err;
+ }
+
+ return 0;
+
+rx_pkts_err:
+ rx_free_cqe(rxq);
+
+rx_cqe_err:
+ kfree(rxq->rx_info);
- netif_napi_add_weight(rxq->netdev, &rxq->napi, rx_poll,
- nic_dev->rx_weight);
- napi_enable(&rxq->napi);
+ return err;
}
-static void rx_del_napi(struct hinic_rxq *rxq)
+static void hinic_free_rx_resources(struct hinic_rxq *rxq)
{
- napi_disable(&rxq->napi);
- netif_napi_del(&rxq->napi);
+ hinic_rx_free_buffers(rxq);
+ rx_free_cqe(rxq);
+ kfree(rxq->rx_info);
}
-static irqreturn_t rx_irq(int irq, void *data)
+int hinic_setup_all_rx_resources(struct net_device *netdev,
+ struct irq_info *msix_entries)
{
- struct hinic_rxq *rxq = (struct hinic_rxq *)data;
- struct hinic_rq *rq = rxq->rq;
- struct hinic_dev *nic_dev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i, q_id;
+ int err;
+
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ err = hinic_setup_rx_resources(&nic_dev->rxqs[q_id],
+ nic_dev->netdev,
+ &msix_entries[q_id]);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to set up rxq resource\n");
+ goto init_rxq_err;
+ }
+ }
- /* Disable the interrupt until napi will be completed */
- nic_dev = netdev_priv(rxq->netdev);
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- rq->msix_entry,
- HINIC_MSIX_DISABLE);
+ return 0;
- nic_dev = netdev_priv(rxq->netdev);
- hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
+init_rxq_err:
+ for (i = 0; i < q_id; i++)
+ hinic_free_rx_resources(&nic_dev->rxqs[i]);
- napi_schedule(&rxq->napi);
- return IRQ_HANDLED;
+ return err;
}
-static int rx_request_irq(struct hinic_rxq *rxq)
+void hinic_free_all_rx_resources(struct net_device *netdev)
{
- struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- struct hinic_msix_config interrupt_info = {0};
- struct hinic_intr_coal_info *intr_coal = NULL;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_rq *rq = rxq->rq;
- struct hinic_qp *qp;
- int err;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++)
+ hinic_free_rx_resources(&nic_dev->rxqs[q_id]);
+}
- qp = container_of(rq, struct hinic_qp, rq);
+int hinic_alloc_rxqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic_rxq *rxq;
+ u16 num_rxqs = nic_dev->max_qps;
+ u16 q_id;
+ u64 rxq_size;
+
+ rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
+ if (!rxq_size) {
+ nic_err(&pdev->dev, "Cannot allocate zero size rxqs\n");
+ return -EINVAL;
+ }
- rx_add_napi(rxq);
+ nic_dev->rxqs = kzalloc(rxq_size, GFP_KERNEL);
+ if (!nic_dev->rxqs)
+ return -ENOMEM;
- hinic_hwdev_msix_set(hwdev, rq->msix_entry,
- RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
- RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
- RX_IRQ_NO_RESEND_TIMER);
+ for (q_id = 0; q_id < num_rxqs; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rxq->netdev = netdev;
+ rxq->dev = &pdev->dev;
+ rxq->q_id = q_id;
+ rxq->buf_len = nic_dev->rx_buff_len;
+ rxq->rx_buff_shift = ilog2(nic_dev->rx_buff_len);
+ rxq->dma_rx_buff_size = RX_BUFF_NUM_PER_PAGE *
+ nic_dev->rx_buff_len;
+ rxq->q_depth = nic_dev->rq_depth;
+ rxq->q_mask = nic_dev->rq_depth - 1;
+
+ rxq_stats_init(rxq);
+ }
- intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id];
- interrupt_info.msix_index = rq->msix_entry;
- interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
- interrupt_info.pending_cnt = intr_coal->pending_limt;
- interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
+ return 0;
+}
- err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
- if (err) {
- netif_err(nic_dev, drv, rxq->netdev,
- "Failed to set RX interrupt coalescing attribute\n");
- goto err_req_irq;
+void hinic_free_rxqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic_clear_rss_config_user(nic_dev);
+ kfree(nic_dev->rxqs);
+}
+
+void hinic_init_rss_parameters(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR;
+
+ nic_dev->rss_type.tcp_ipv6_ext = 1;
+ nic_dev->rss_type.ipv6_ext = 1;
+ nic_dev->rss_type.tcp_ipv6 = 1;
+ nic_dev->rss_type.ipv6 = 1;
+ nic_dev->rss_type.tcp_ipv4 = 1;
+ nic_dev->rss_type.ipv4 = 1;
+ nic_dev->rss_type.udp_ipv6 = 1;
+ nic_dev->rss_type.udp_ipv4 = 1;
+}
+
+void hinic_set_default_rss_indir(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (!nic_dev->rss_indir_user)
+ return;
+
+ nicif_info(nic_dev, drv, netdev,
+ "Discard user configured Rx flow hash indirection\n");
+
+ kfree(nic_dev->rss_indir_user);
+ nic_dev->rss_indir_user = NULL;
+}
+
+static void hinic_maybe_reconfig_rss_indir(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int i;
+
+ if (!nic_dev->rss_indir_user)
+ return;
+
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
+ goto discard_user_rss_indir;
+
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
+ if (nic_dev->rss_indir_user[i] >= nic_dev->num_qps)
+ goto discard_user_rss_indir;
+ }
+
+ return;
+
+discard_user_rss_indir:
+ hinic_set_default_rss_indir(netdev);
+}
+
+static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev)
+{
+ kfree(nic_dev->rss_hkey_user);
+
+ nic_dev->rss_hkey_user_be = NULL;
+ nic_dev->rss_hkey_user = NULL;
+
+ kfree(nic_dev->rss_indir_user);
+ nic_dev->rss_indir_user = NULL;
+}
+
+static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev,
+ u8 num_tcs, u32 *indir)
+{
+ u16 num_rss, tc_group_size;
+ int i;
+
+ if (num_tcs)
+ tc_group_size = HINIC_RSS_INDIR_SIZE / num_tcs;
+ else
+ tc_group_size = HINIC_RSS_INDIR_SIZE;
+
+ num_rss = nic_dev->num_rss;
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir[i] = (i / tc_group_size) * num_rss + i % num_rss;
+}
+
+static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev)
+{
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+
+ hinic_rss_cfg(nic_dev->hwdev, 0, nic_dev->rss_tmpl_idx, 0, prio_tc);
+}
+
+int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc,
+ u8 *prio_tc)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 tmpl_idx = 0xFF;
+ u8 default_rss_key[HINIC_RSS_KEY_SIZE] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa};
+ u32 *indir_tbl;
+ u8 *hkey;
+ int err;
+
+ tmpl_idx = nic_dev->rss_tmpl_idx;
+
+ /* RSS key */
+ if (nic_dev->rss_hkey_user)
+ hkey = nic_dev->rss_hkey_user;
+ else
+ hkey = default_rss_key;
+ err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hkey);
+ if (err)
+ return err;
+
+ hinic_maybe_reconfig_rss_indir(netdev);
+ indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL);
+ if (!indir_tbl) {
+ nicif_err(nic_dev, drv, netdev, "Failed to allocate set hw rss indir_tbl\n");
+ return -ENOMEM;
}
- err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
+ if (nic_dev->rss_indir_user)
+ memcpy(indir_tbl, nic_dev->rss_indir_user,
+ sizeof(u32) * HINIC_RSS_INDIR_SIZE);
+ else
+ hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl);
+
+ err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indir_tbl);
+ if (err)
+ goto out;
+
+ err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, nic_dev->rss_type);
+ if (err)
+ goto out;
+
+ err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx,
+ nic_dev->rss_hash_engine);
if (err)
- goto err_req_irq;
+ goto out;
- cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
- err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
+ err = hinic_rss_cfg(nic_dev->hwdev, rss_en, tmpl_idx, num_tc, prio_tc);
if (err)
- goto err_irq_affinity;
+ goto out;
+ kfree(indir_tbl);
return 0;
-err_irq_affinity:
- free_irq(rq->irq, rxq);
-err_req_irq:
- rx_del_napi(rxq);
+out:
+ kfree(indir_tbl);
return err;
}
-static void rx_free_irq(struct hinic_rxq *rxq)
+static int hinic_rss_init(struct hinic_nic_dev *nic_dev)
{
- struct hinic_rq *rq = rxq->rq;
+ struct net_device *netdev = nic_dev->netdev;
+ u32 *indir_tbl;
+ u8 cos, num_tc = 0;
+ u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
+ int err;
+
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) {
+ num_tc = nic_dev->max_cos;
+ for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) {
+ if (cos < HINIC_DCB_COS_MAX - nic_dev->max_cos)
+ prio_tc[cos] = nic_dev->max_cos - 1;
+ else
+ prio_tc[cos] = (HINIC_DCB_COS_MAX - 1) - cos;
+ }
+ } else {
+ num_tc = 0;
+ }
+
+ indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL);
+ if (!indir_tbl)
+ return -ENOMEM;
- irq_update_affinity_hint(rq->irq, NULL);
- free_irq(rq->irq, rxq);
- rx_del_napi(rxq);
+ if (nic_dev->rss_indir_user)
+ memcpy(indir_tbl, nic_dev->rss_indir_user,
+ sizeof(u32) * HINIC_RSS_INDIR_SIZE);
+ else
+ hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl);
+ err = hinic_set_hw_rss_parameters(netdev, 1, num_tc, prio_tc);
+ if (err) {
+ kfree(indir_tbl);
+ return err;
+ }
+
+ kfree(indir_tbl);
+ return 0;
}
-/**
- * hinic_init_rxq - Initialize the Rx Queue
- * @rxq: Logical Rx Queue
- * @rq: Hardware Rx Queue to connect the Logical queue with
- * @netdev: network device to connect the Logical queue with
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
- struct net_device *netdev)
+int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc)
{
- struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
- int err, pkts;
-
- rxq->netdev = netdev;
- rxq->rq = rq;
- rxq->buf_len = HINIC_RX_BUF_SZ;
- rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u8 tmpl_idx = nic_dev->rss_tmpl_idx;
- rxq_stats_init(rxq);
+ /* RSS must be enable when dcb is enabled */
+ return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc);
+}
- rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
- "%s_rxq%d", netdev->name, qp->q_id);
- if (!rxq->irq_name)
- return -ENOMEM;
+int hinic_rx_configure(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
- pkts = rx_alloc_pkts(rxq);
- if (!pkts) {
- err = -ENOMEM;
- goto err_rx_pkts;
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) {
+ err = hinic_rss_init(nic_dev);
+ if (err) {
+ nicif_err(nic_dev, drv, netdev, "Failed to init rss\n");
+ return -EFAULT;
+ }
}
- err = rx_request_irq(rxq);
+ err = hinic_dcb_set_rq_iq_mapping(nic_dev->hwdev,
+ hinic_func_max_qnum(nic_dev->hwdev),
+ NULL);
if (err) {
- netdev_err(netdev, "Failed to request Rx irq\n");
- goto err_req_rx_irq;
+ nicif_err(nic_dev, drv, netdev, "Failed to set rq_iq mapping\n");
+ goto set_rq_cos_mapping_err;
}
return 0;
-err_req_rx_irq:
-err_rx_pkts:
- free_all_rx_skbs(rxq);
- devm_kfree(&netdev->dev, rxq->irq_name);
+set_rq_cos_mapping_err:
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
+ hinic_rss_deinit(nic_dev);
+
return err;
}
-/**
- * hinic_clean_rxq - Clean the Rx Queue
- * @rxq: Logical Rx Queue
- **/
-void hinic_clean_rxq(struct hinic_rxq *rxq)
+void hinic_rx_remove_configure(struct net_device *netdev)
{
- struct net_device *netdev = rxq->netdev;
-
- rx_free_irq(rxq);
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
- free_all_rx_skbs(rxq);
- devm_kfree(&netdev->dev, rxq->irq_name);
+ if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags))
+ hinic_rss_deinit(nic_dev);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 8f7bd6a049bd..901d02045b49 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -1,51 +1,123 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
*/
#ifndef HINIC_RX_H
#define HINIC_RX_H
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/interrupt.h>
-
-#include "hinic_hw_qp.h"
-
-#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
+/* rx cqe checksum err */
+#define HINIC_RX_CSUM_IP_CSUM_ERR BIT(0)
+#define HINIC_RX_CSUM_TCP_CSUM_ERR BIT(1)
+#define HINIC_RX_CSUM_UDP_CSUM_ERR BIT(2)
+#define HINIC_RX_CSUM_IGMP_CSUM_ERR BIT(3)
+#define HINIC_RX_CSUM_ICMPV4_CSUM_ERR BIT(4)
+#define HINIC_RX_CSUM_ICMPV6_CSUM_ERR BIT(5)
+#define HINIC_RX_CSUM_SCTP_CRC_ERR BIT(6)
#define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7)
#define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8)
+#define HINIC_SUPPORT_LRO_ADAP_QPS_MAX 16
+#define HINIC_RX_BUFFER_WRITE 16
+
struct hinic_rxq_stats {
- u64 pkts;
- u64 bytes;
- u64 errors;
- u64 csum_errors;
- u64 other_errors;
- u64 alloc_skb_err;
- struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+ u64 errors;
+ u64 csum_errors;
+ u64 other_errors;
+ u64 dropped;
+ u64 rx_buf_empty;
+
+ u64 alloc_skb_err;
+ u64 alloc_rx_buf_err;
+ u64 map_rx_buf_err;
+
+ struct u64_stats_sync syncp;
+};
+
+struct hinic_rx_info {
+ dma_addr_t buf_dma_addr;
+
+ struct hinic_rq_cqe *cqe;
+ dma_addr_t cqe_dma;
+ struct page *page;
+ u32 page_offset;
+ struct hinic_rq_wqe *rq_wqe;
};
struct hinic_rxq {
- struct net_device *netdev;
- struct hinic_rq *rq;
+ struct net_device *netdev;
- struct hinic_rxq_stats rxq_stats;
+ u16 q_id;
+ u16 q_depth;
+ u16 q_mask;
- char *irq_name;
u16 buf_len;
u32 rx_buff_shift;
+ u32 dma_rx_buff_size;
+
+ struct hinic_rxq_stats rxq_stats;
+ u16 cons_idx;
+ u16 delta;
+
+ u32 irq_id;
+ u16 msix_entry_idx;
+
+ struct hinic_rx_info *rx_info;
+
+ struct hinic_irq *irq_cfg;
+ u16 next_to_alloc;
+ u16 next_to_update;
+ struct device *dev; /* device for DMA mapping */
+
+ unsigned long status;
+ dma_addr_t cqe_start_paddr;
+ void *cqe_start_vaddr;
+ u64 last_moder_packets;
+ u64 last_moder_bytes;
+ u8 last_coalesc_timer_cfg;
+ u8 last_pending_limt;
- struct napi_struct napi;
};
-void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
+void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats);
+
+void hinic_rxq_get_stats(struct hinic_rxq *rxq,
+ struct hinic_rxq_stats *stats);
+
+int hinic_alloc_rxqs(struct net_device *netdev);
+
+void hinic_free_rxqs(struct net_device *netdev);
+
+void hinic_init_rss_parameters(struct net_device *netdev);
+
+void hinic_set_default_rss_indir(struct net_device *netdev);
+
+int hinic_setup_all_rx_resources(struct net_device *netdev,
+ struct irq_info *msix_entries);
+
+void hinic_free_all_rx_resources(struct net_device *netdev);
+
+void hinic_rx_remove_configure(struct net_device *netdev);
+
+int hinic_rx_configure(struct net_device *netdev);
+
+int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc,
+ u8 *prio_tc);
-int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
- struct net_device *netdev);
+int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc);
-void hinic_clean_rxq(struct hinic_rxq *rxq);
+int hinic_rx_poll(struct hinic_rxq *rxq, int budget);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h b/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h
new file mode 100644
index 000000000000..70aec3d6fbfa
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __CHIPIF_SM_LT_H__
+#define __CHIPIF_SM_LT_H__
+
+#define SM_LT_LOAD 0x12
+#define SM_LT_STORE 0x14
+
+#define SM_LT_NUM_OFFSET 13
+#define SM_LT_ABUF_FLG_OFFSET 12
+#define SM_LT_BC_OFFSET 11
+
+#define SM_LT_ENTRY_16B 16
+#define SM_LT_ENTRY_32B 32
+#define SM_LT_ENTRY_48B 48
+#define SM_LT_ENTRY_64B 64
+
+#define TBL_LT_OFFSET_DEFAULT 0
+
+#define SM_CACHE_LINE_SHFT 4 /* log2(16) */
+#define SM_CACHE_LINE_SIZE 16 /* the size of cache line */
+
+#define MAX_SM_LT_READ_LINE_NUM 4
+#define MAX_SM_LT_WRITE_LINE_NUM 3
+
+#define SM_LT_FULL_BYTEENB 0xFFFF
+
+#define TBL_GET_ENB3_MASK(bitmask) ((u16)(((bitmask) >> 32) & 0xFFFF))
+#define TBL_GET_ENB2_MASK(bitmask) ((u16)(((bitmask) >> 16) & 0xFFFF))
+#define TBL_GET_ENB1_MASK(bitmask) ((u16)((bitmask) & 0xFFFF))
+
+enum {
+ SM_LT_NUM_0 = 0, /* lt num = 0, load/store 16B */
+ SM_LT_NUM_1, /* lt num = 1, load/store 32B */
+ SM_LT_NUM_2, /* lt num = 2, load/store 48B */
+ SM_LT_NUM_3 /* lt num = 3, load 64B */
+};
+
+/* lt load request */
+union sml_lt_req_head {
+ struct {
+ u32 offset : 8;
+ u32 pad : 3;
+ u32 bc : 1;
+ u32 abuf_flg : 1;
+ u32 num : 2;
+ u32 ack : 1;
+ u32 op_id : 5;
+ u32 instance : 6;
+ u32 src : 5;
+ } bs;
+
+ u32 value;
+};
+
+struct sml_lt_load_req {
+ u32 extra;
+ union sml_lt_req_head head;
+ u32 index;
+ u32 pad0;
+ u32 pad1;
+};
+
+struct sml_lt_store_req {
+ u32 extra;
+ union sml_lt_req_head head;
+ u32 index;
+ u32 byte_enb[2];
+ u8 write_data[48];
+};
+
+enum {
+ SM_LT_OFFSET_1 = 1,
+ SM_LT_OFFSET_2,
+ SM_LT_OFFSET_3,
+ SM_LT_OFFSET_4,
+ SM_LT_OFFSET_5,
+ SM_LT_OFFSET_6,
+ SM_LT_OFFSET_7,
+ SM_LT_OFFSET_8,
+ SM_LT_OFFSET_9,
+ SM_LT_OFFSET_10,
+ SM_LT_OFFSET_11,
+ SM_LT_OFFSET_12,
+ SM_LT_OFFSET_13,
+ SM_LT_OFFSET_14,
+ SM_LT_OFFSET_15
+};
+
+static inline void sml_lt_store_memcpy(u32 *dst, u32 *src, u8 num)
+{
+ switch (num) {
+ case SM_LT_NUM_2:
+ *(dst + SM_LT_OFFSET_11) = *(src + SM_LT_OFFSET_11);
+ *(dst + SM_LT_OFFSET_10) = *(src + SM_LT_OFFSET_10);
+ *(dst + SM_LT_OFFSET_9) = *(src + SM_LT_OFFSET_9);
+ *(dst + SM_LT_OFFSET_8) = *(src + SM_LT_OFFSET_8);
+ fallthrough;
+ case SM_LT_NUM_1:
+ *(dst + SM_LT_OFFSET_7) = *(src + SM_LT_OFFSET_7);
+ *(dst + SM_LT_OFFSET_6) = *(src + SM_LT_OFFSET_6);
+ *(dst + SM_LT_OFFSET_5) = *(src + SM_LT_OFFSET_5);
+ *(dst + SM_LT_OFFSET_4) = *(src + SM_LT_OFFSET_4);
+ fallthrough;
+ case SM_LT_NUM_0:
+ *(dst + SM_LT_OFFSET_3) = *(src + SM_LT_OFFSET_3);
+ *(dst + SM_LT_OFFSET_2) = *(src + SM_LT_OFFSET_2);
+ *(dst + SM_LT_OFFSET_1) = *(src + SM_LT_OFFSET_1);
+ *dst = *src;
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void sml_lt_load_memcpy(u32 *dst, u32 *src, u8 num)
+{
+ switch (num) {
+ case SM_LT_NUM_3:
+ *(dst + SM_LT_OFFSET_15) = *(src + SM_LT_OFFSET_15);
+ *(dst + SM_LT_OFFSET_14) = *(src + SM_LT_OFFSET_14);
+ *(dst + SM_LT_OFFSET_13) = *(src + SM_LT_OFFSET_13);
+ *(dst + SM_LT_OFFSET_12) = *(src + SM_LT_OFFSET_12);
+ fallthrough;
+ case SM_LT_NUM_2:
+ *(dst + SM_LT_OFFSET_11) = *(src + SM_LT_OFFSET_11);
+ *(dst + SM_LT_OFFSET_10) = *(src + SM_LT_OFFSET_10);
+ *(dst + SM_LT_OFFSET_9) = *(src + SM_LT_OFFSET_9);
+ *(dst + SM_LT_OFFSET_8) = *(src + SM_LT_OFFSET_8);
+ fallthrough;
+ case SM_LT_NUM_1:
+ *(dst + SM_LT_OFFSET_7) = *(src + SM_LT_OFFSET_7);
+ *(dst + SM_LT_OFFSET_6) = *(src + SM_LT_OFFSET_6);
+ *(dst + SM_LT_OFFSET_5) = *(src + SM_LT_OFFSET_5);
+ *(dst + SM_LT_OFFSET_4) = *(src + SM_LT_OFFSET_4);
+ fallthrough;
+ case SM_LT_NUM_0:
+ *(dst + SM_LT_OFFSET_3) = *(src + SM_LT_OFFSET_3);
+ *(dst + SM_LT_OFFSET_2) = *(src + SM_LT_OFFSET_2);
+ *(dst + SM_LT_OFFSET_1) = *(src + SM_LT_OFFSET_1);
+ *dst = *src;
+ break;
+ default:
+ break;
+ }
+}
+
+enum HINIC_CSR_API_DATA_OPERATION_ID {
+ HINIC_CSR_OPERATION_WRITE_CSR = 0x1E,
+ HINIC_CSR_OPERATION_READ_CSR = 0x1F
+};
+
+enum HINIC_CSR_API_DATA_NEED_RESPONSE_DATA {
+ HINIC_CSR_NO_RESP_DATA = 0,
+ HINIC_CSR_NEED_RESP_DATA = 1
+};
+
+enum HINIC_CSR_API_DATA_DATA_SIZE {
+ HINIC_CSR_DATA_SZ_32 = 0,
+ HINIC_CSR_DATA_SZ_64 = 1
+};
+
+struct hinic_csr_request_api_data {
+ u32 dw0;
+
+ union {
+ struct {
+ u32 reserved1 : 13;
+ /* this field indicates the write/read data size:
+ * 2'b00: 32 bits
+ * 2'b01: 64 bits
+ * 2'b10~2'b11:reserved
+ */
+ u32 data_size : 2;
+ /* this field indicates that requestor expect receive a
+ * response data or not.
+ * 1'b0: expect not to receive a response data.
+ * 1'b1: expect to receive a response data.
+ */
+ u32 need_response : 1;
+ /* this field indicates the operation that the requestor
+ * expected.
+ * 5'b1_1110: write value to csr space.
+ * 5'b1_1111: read register from csr space.
+ */
+ u32 operation_id : 5;
+ u32 reserved2 : 6;
+ /* this field specifies the Src node ID for this API
+ * request message.
+ */
+ u32 src_node_id : 5;
+ } bits;
+
+ u32 val32;
+ } dw1;
+
+ union {
+ struct {
+ /* it specifies the CSR address. */
+ u32 csr_addr : 26;
+ u32 reserved3 : 6;
+ } bits;
+
+ u32 val32;
+ } dw2;
+
+ /* if data_size=2'b01, it is high 32 bits of write data. else, it is
+ * 32'hFFFF_FFFF.
+ */
+ u32 csr_write_data_h;
+ /* the low 32 bits of write data. */
+ u32 csr_write_data_l;
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
new file mode 100644
index 000000000000..8c205e96454f
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_sml_counter.h"
+
+static void sml_ctr_htonl_n(u32 *node, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ *node = cpu_to_be32(*node);
+ node++;
+ }
+}
+
+static void hinic_sml_ctr_read_build_req(struct chipif_sml_ctr_rd_req *msg,
+ u8 instance_id, u8 op_id,
+ u8 ack, u32 ctr_id, u32 init_val)
+{
+ msg->head.value = 0;
+ msg->head.bs.instance = instance_id;
+ msg->head.bs.op_id = op_id;
+ msg->head.bs.ack = ack;
+ msg->head.value = cpu_to_be32(msg->head.value);
+
+ msg->ctr_id = ctr_id;
+ msg->ctr_id = cpu_to_be32(msg->ctr_id);
+
+ msg->initial = init_val;
+}
+
+static void hinic_sml_ctr_write_build_req(struct chipif_sml_ctr_wr_req *msg,
+ u8 instance_id, u8 op_id,
+ u8 ack, u32 ctr_id,
+ u64 val1, u64 val2)
+{
+ msg->head.value = 0;
+ msg->head.bs.instance = instance_id;
+ msg->head.bs.op_id = op_id;
+ msg->head.bs.ack = ack;
+ msg->head.value = cpu_to_be32(msg->head.value);
+
+ msg->ctr_id = ctr_id;
+ msg->ctr_id = cpu_to_be32(msg->ctr_id);
+
+ msg->value1_h = val1 >> 32;
+ msg->value1_l = val1 & 0xFFFFFFFF;
+
+ msg->value2_h = val2 >> 32;
+ msg->value2_l = val2 & 0xFFFFFFFF;
+}
+
+/**
+ * hinic_sm_ctr_rd32 - small single 32 counter read
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: read counter value ptr
+ * Return: 0 - success, negative - failure
+ */
+int hinic_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value)
+{
+ struct chipif_sml_ctr_rd_req req;
+ union ctr_rd_rsp rsp;
+ int ret;
+
+ if (!hwdev || !value)
+ return -EFAULT;
+
+ hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req),
+ (void *)&rsp, (unsigned short)sizeof(rsp));
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Sm 32bit counter read fail, err(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value = rsp.bs_ss32_rsp.value1;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_sm_ctr_rd32);
+
+/**
+ * hinic_sm_ctr_rd32_clear - small single 32 counter read and clear to zero
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: read counter value ptr
+ * Return: 0 - success, negative - failure
+ * according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc)
+ */
+int hinic_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u32 *value)
+{
+ struct chipif_sml_ctr_rd_req req;
+ union ctr_rd_rsp rsp;
+ int ret;
+
+ if (!hwdev || !value)
+ return -EFAULT;
+
+ hinic_sml_ctr_read_build_req(&req, instance,
+ CHIPIF_SM_CTR_OP_READ_CLEAR,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req),
+ (void *)&rsp, (unsigned short)sizeof(rsp));
+
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Sm 32bit counter clear fail, err(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value = rsp.bs_ss32_rsp.value1;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_sm_ctr_rd32_clear);
+
+/**
+ * hinic_sm_ctr_wr32 - small single 32 counter write
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: write counter value
+ * Return: 0 - success, negative - failure
+ */
+int hinic_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value)
+{
+ struct chipif_sml_ctr_wr_req req;
+ struct chipif_sml_ctr_wr_rsp rsp;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
+ CHIPIF_NOACK, ctr_id, (u64)value, 0ULL);
+
+ return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+}
+
+/**
+ * hinic_sm_ctr_rd64 - big counter 64 read
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: read counter value ptr
+ * Return: 0 - success, negative - failure
+ */
+int hinic_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value)
+{
+ struct chipif_sml_ctr_rd_req req;
+ union ctr_rd_rsp rsp;
+ int ret;
+
+ if (!hwdev || !value)
+ return -EFAULT;
+
+ hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Sm 64bit counter read fail err(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value = ((u64)rsp.bs_bs64_rsp.value1 << 32) | rsp.bs_bs64_rsp.value2;
+
+ return 0;
+}
+
+/**
+ * hinic_sm_ctr_wr64 - big single 64 counter write
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value: write counter value
+ * Return: 0 - success, negative - failure
+ */
+int hinic_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 value)
+{
+ struct chipif_sml_ctr_wr_req req;
+ struct chipif_sml_ctr_wr_rsp rsp;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
+ CHIPIF_NOACK, ctr_id, value, 0ULL);
+
+ return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+}
+
+/**
+ * hinic_sm_ctr_rd64_pair - big pair 128 counter read
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value1: read counter value ptr
+ * @value2: read counter value ptr
+ * Return: 0 - success, negative - failure
+ */
+int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 *value1, u64 *value2)
+{
+ struct chipif_sml_ctr_rd_req req;
+ union ctr_rd_rsp rsp;
+ int ret;
+
+ if (!value1) {
+ pr_err("value1 is NULL for read 64 bit pair\n");
+ return -EFAULT;
+ }
+
+ if (!value2) {
+ pr_err("value2 is NULL for read 64 bit pair\n");
+ return -EFAULT;
+ }
+
+ if (!hwdev || (0 != (ctr_id & 0x1))) {
+ pr_err("Hwdev is NULL or ctr_id(%d) is odd number for read 64 bit pair\n",
+ ctr_id);
+ return -EFAULT;
+ }
+
+ hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ,
+ CHIPIF_ACK, ctr_id, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Sm 64 bit rd pair ret(%d)\n", ret);
+ return ret;
+ }
+ sml_ctr_htonl_n((u32 *)&rsp, 4);
+ *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << 32) | rsp.bs_bp64_rsp.val1_l;
+ *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << 32) | rsp.bs_bp64_rsp.val2_l;
+
+ return 0;
+}
+
+/**
+ * hinic_sm_ctr_wr64_pair - big pair 128 counter write
+ * @hwdev: the pointer to hw device
+ * @node: the node id
+ * @instance: instance value
+ * @ctr_id: counter id
+ * @value1: write counter value
+ * @value2: write counter value
+ * Return: 0 - success, negative - failure
+ */
+int hinic_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance,
+ u32 ctr_id, u64 value1, u64 value2)
+{
+ struct chipif_sml_ctr_wr_req req;
+ struct chipif_sml_ctr_wr_rsp rsp;
+
+ /* pair pattern ctr_id must be even number */
+ if (!hwdev || (0 != (ctr_id & 0x1))) {
+ pr_err("Handle is NULL or ctr_id(%d) is odd number for write 64 bit pair\n",
+ ctr_id);
+ return -EFAULT;
+ }
+
+ hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE,
+ CHIPIF_NOACK, ctr_id, value1, value2);
+ return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req,
+ (unsigned short)sizeof(req), (void *)&rsp,
+ (unsigned short)sizeof(rsp));
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h
new file mode 100644
index 000000000000..55df554efe28
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __CHIPIF_SML_COUNTER_H__
+#define __CHIPIF_SML_COUNTER_H__
+
+#define CHIPIF_FUNC_PF 0
+#define CHIPIF_FUNC_VF 1
+#define CHIPIF_FUNC_PPF 2
+
+#define CHIPIF_ACK 1
+#define CHIPIF_NOACK 0
+
+#define CHIPIF_SM_CTR_OP_READ 0x2
+#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6
+#define CHIPIF_SM_CTR_OP_WRITE 0x3
+
+#define SMALL_CNT_READ_RSP_SIZE 16
+
+/* request head */
+union chipif_sml_ctr_req_head {
+ struct {
+ u32 pad : 15;
+ u32 ack : 1;
+ u32 op_id : 5;
+ u32 instance : 6;
+ u32 src : 5;
+ } bs;
+
+ u32 value;
+};
+
+/* counter read request struct */
+struct chipif_sml_ctr_rd_req {
+ u32 extra;
+ union chipif_sml_ctr_req_head head;
+ u32 ctr_id;
+ u32 initial;
+ u32 pad;
+};
+
+/* counter read response union */
+union ctr_rd_rsp {
+ struct {
+ u32 value1 : 16;
+ u32 pad0 : 16;
+ u32 pad1[3];
+ } bs_ss16_rsp;
+
+ struct {
+ u32 value1;
+ u32 pad[3];
+ } bs_ss32_rsp;
+
+ struct {
+ u32 value1 : 20;
+ u32 pad0 : 12;
+ u32 value2 : 12;
+ u32 pad1 : 20;
+ u32 pad2[2];
+ } bs_sp_rsp;
+
+ struct {
+ u32 value1;
+ u32 value2;
+ u32 pad[2];
+ } bs_bs64_rsp;
+
+ struct {
+ u32 val1_h;
+ u32 val1_l;
+ u32 val2_h;
+ u32 val2_l;
+ } bs_bp64_rsp;
+
+};
+
+/* resopnse head */
+union sml_ctr_rsp_head {
+ struct {
+ u32 pad : 30; /* reserve */
+ u32 code : 2; /* error code */
+ } bs;
+
+ u32 value;
+};
+
+/* counter write request struct */
+struct chipif_sml_ctr_wr_req {
+ u32 extra;
+ union chipif_sml_ctr_req_head head;
+ u32 ctr_id;
+ u32 rsv1;
+ u32 rsv2;
+ u32 value1_h;
+ u32 value1_l;
+ u32 value2_h;
+ u32 value2_l;
+};
+
+/*counter write response struct */
+struct chipif_sml_ctr_wr_rsp {
+ union sml_ctr_rsp_head head;
+ u32 pad[3];
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c b/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c
new file mode 100644
index 000000000000..3f1160d059d1
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "ossl_knl.h"
+#include "hinic_sm_lt.h"
+#include "hinic_hw.h"
+#include "hinic_hwdev.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwif.h"
+#include "hinic_dbg.h"
+
+#define ACK 1
+#define NOACK 0
+
+#define LT_LOAD16_API_SIZE (16 + 4)
+#define LT_STORE16_API_SIZE (32 + 4)
+
+#define HINIC_API_RD_8B 8
+#define HINIC_API_RD_4B 4
+
+static inline void sm_lt_build_head(union sml_lt_req_head *head,
+ u8 instance_id,
+ u8 op_id, u8 ack,
+ u8 offset, u8 num)
+{
+ head->value = 0;
+ head->bs.instance = instance_id;
+ head->bs.op_id = op_id;
+ head->bs.ack = ack;
+ head->bs.num = num;
+ head->bs.abuf_flg = 0;
+ head->bs.bc = 1;
+ head->bs.offset = offset;
+ head->value = cpu_to_be32(head->value);
+}
+
+static inline void sm_lt_load_build_req(struct sml_lt_load_req *req,
+ u8 instance_id,
+ u8 op_id, u8 ack,
+ u32 lt_index,
+ u8 offset, u8 num)
+{
+ sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num);
+ req->extra = 0;
+ req->index = lt_index;
+ req->index = cpu_to_be32(req->index);
+}
+
+static inline void sm_lt_store_build_req(struct sml_lt_store_req *req,
+ u8 instance_id,
+ u8 op_id, u8 ack,
+ u32 lt_index,
+ u8 offset,
+ u8 num,
+ u16 byte_enb3,
+ u16 byte_enb2,
+ u16 byte_enb1,
+ u8 *data)
+{
+ sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num);
+ req->index = lt_index;
+ req->index = cpu_to_be32(req->index);
+ req->extra = 0;
+ req->byte_enb[0] = (u32)(byte_enb3);
+ req->byte_enb[0] = cpu_to_be32(req->byte_enb[0]);
+ req->byte_enb[1] = cpu_to_be32((((u32)byte_enb2) << 16) | byte_enb1);
+ sml_lt_store_memcpy((u32 *)req->write_data, (u32 *)(void *)data, num);
+}
+
+int hinic_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance,
+ u32 lt_index, u8 *data)
+{
+ struct sml_lt_load_req req;
+ int ret;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ sm_lt_load_build_req(&req, instance, SM_LT_LOAD, ACK, lt_index, 0, 0);
+
+ ret = hinic_api_cmd_read_ack(hwdev, dest, &req,
+ LT_LOAD16_API_SIZE, (void *)data, 16);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Read linear table 16byte fail, err: %d\n", ret);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_dbg_lt_rd_16byte);
+
+int hinic_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance,
+ u32 lt_index, u8 *data, u16 mask)
+{
+ struct sml_lt_store_req req;
+ int ret;
+
+ if (!hwdev || !data)
+ return -EFAULT;
+
+ sm_lt_store_build_req(&req, instance, SM_LT_STORE, NOACK, lt_index,
+ 0, 0, 0, 0, mask, data);
+
+ ret = hinic_api_cmd_write_nack(hwdev, dest, &req, LT_STORE16_API_SIZE);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Write linear table 16byte fail, err: %d\n", ret);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_dbg_lt_wr_16byte_mask);
+
+int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_csr_request_api_data api_data = { 0 };
+ u32 csr_val = 0;
+ u16 in_size = sizeof(api_data);
+ int ret;
+
+ if (!hwdev || !val)
+ return -EFAULT;
+
+ if (dest == HINIC_NODE_ID_CPI) {
+ *val = readl(dev->hwif->cfg_regs_base + addr);
+ return 0;
+ }
+
+ api_data.dw0 = 0;
+ api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_READ_CSR;
+ api_data.dw1.bits.need_response = HINIC_CSR_NEED_RESP_DATA;
+ api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_32;
+ api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
+ api_data.dw2.bits.csr_addr = addr;
+ api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
+
+ ret = hinic_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data),
+ in_size, &csr_val, HINIC_API_RD_4B);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Read 32 bit csr failed, dest %d addr 0x%x, ret: 0x%x\n",
+ dest, addr, ret);
+ return ret;
+ }
+
+ *val = csr_val;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_api_csr_rd32);
+
+int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val)
+{
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_csr_request_api_data api_data = { 0 };
+ u16 in_size = sizeof(api_data);
+ int ret;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ if (dest == HINIC_NODE_ID_CPI) {
+ writel(val, dev->hwif->cfg_regs_base + addr);
+ return 0;
+ }
+
+ api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_WRITE_CSR;
+ api_data.dw1.bits.need_response = HINIC_CSR_NO_RESP_DATA;
+ api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_32;
+ api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
+ api_data.dw2.bits.csr_addr = addr;
+ api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
+ api_data.csr_write_data_h = 0xffffffff;
+ api_data.csr_write_data_l = val;
+
+ ret = hinic_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Write 32 bit csr failed, dest %d addr 0x%x val 0x%x\n",
+ dest, addr, val);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_api_csr_wr32);
+
+int hinic_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val)
+{
+ struct hinic_csr_request_api_data api_data = { 0 };
+ u64 csr_val = 0;
+ u16 in_size = sizeof(api_data);
+ int ret;
+
+ if (!hwdev || !val)
+ return -EFAULT;
+
+ if (dest == HINIC_NODE_ID_CPI) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport to read 64 bit csr from cpi\n");
+ return -EOPNOTSUPP;
+ }
+
+ api_data.dw0 = 0;
+ api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_READ_CSR;
+ api_data.dw1.bits.need_response = HINIC_CSR_NEED_RESP_DATA;
+ api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_64;
+ api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
+ api_data.dw2.bits.csr_addr = addr;
+ api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
+
+ ret = hinic_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data),
+ in_size, &csr_val, HINIC_API_RD_8B);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Read 64 bit csr failed, dest %d addr 0x%x\n",
+ dest, addr);
+ return ret;
+ }
+
+ *val = csr_val;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic_api_csr_rd64);
+
+int hinic_api_csr_wr64(void *hwdev, u8 dest, u32 addr, u64 val)
+{
+ struct hinic_csr_request_api_data api_data = { 0 };
+ u16 in_size = sizeof(api_data);
+ int ret;
+
+ if (!hwdev || !val)
+ return -EFAULT;
+
+ if (dest == HINIC_NODE_ID_CPI) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Unsupport to write 64 bit csr from cpi\n");
+ return -EOPNOTSUPP;
+ }
+
+ api_data.dw0 = 0;
+ api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_WRITE_CSR;
+ api_data.dw1.bits.need_response = HINIC_CSR_NO_RESP_DATA;
+ api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_64;
+ api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32);
+ api_data.dw2.bits.csr_addr = addr;
+ api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32);
+ api_data.csr_write_data_h = cpu_to_be32(upper_32_bits(val));
+ api_data.csr_write_data_l = cpu_to_be32(lower_32_bits(val));
+
+ ret = hinic_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size);
+ if (ret) {
+ sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
+ "Write 64 bit csr failed, dest %d addr 0x%x val 0x%llx\n",
+ dest, addr, val);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index ee357088d021..ef82153b580a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -1,904 +1,451 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
#include <linux/pci.h>
-#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
-#include <linux/module.h>
-#include "hinic_hw_dev.h"
-#include "hinic_dev.h"
-#include "hinic_hw_mbox.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_port.h"
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_nic_cfg.h"
+#include "hinic_nic_dev.h"
#include "hinic_sriov.h"
+#include "hinic_lld.h"
-static unsigned char set_vf_link_state;
-module_param(set_vf_link_state, byte, 0444);
-MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0.");
-
-#define HINIC_VLAN_PRIORITY_SHIFT 13
-#define HINIC_ADD_VLAN_IN_MAC 0x8000
-#define HINIC_TX_RATE_TABLE_FULL 12
-#define HINIC_MAX_QOS 7
-
-static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
- u16 vlan_id, u16 func_id)
+int hinic_pci_sriov_disable(struct pci_dev *dev)
{
- struct hinic_port_mac_cmd mac_info = {0};
- u16 out_size = sizeof(mac_info);
- int err;
+#ifdef CONFIG_PCI_IOV
+ struct hinic_sriov_info *sriov_info;
+ u16 tmp_vfs;
- mac_info.func_idx = func_id;
- mac_info.vlan_id = vlan_id;
- memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
+ /* if SR-IOV is already disabled then nothing will be done */
+ if (!sriov_info->sriov_enabled)
+ return 0;
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
- sizeof(mac_info), &mac_info, &out_size);
- if (err || out_size != sizeof(mac_info) ||
- (mac_info.status && mac_info.status != HINIC_MGMT_STATUS_EXIST)) {
- dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x\n",
- err, mac_info.status, out_size);
- return -EIO;
+ if (test_and_set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) {
+ nic_err(&sriov_info->pdev->dev,
+ "SR-IOV disable in process, please wait\n");
+ return -EPERM;
}
- return 0;
-}
-
-static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id,
- u8 link_status)
-{
- struct vf_data_storage *vf_infos = hwdev->func_to_io.vf_infos;
- struct hinic_port_link_status link = {0};
- u16 out_size = sizeof(link);
- int err;
-
- if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) {
- link.link = link_status;
- link.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
- err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC,
- vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT,
- &link, sizeof(link),
- &link, &out_size, 0);
- if (err || !out_size || link.status)
- dev_err(&hwdev->hwif->pdev->dev,
- "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n",
- HW_VF_ID_TO_OS(vf_id), err,
- link.status, out_size);
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(sriov_info->pdev)) {
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+ nic_warn(&sriov_info->pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return -EPERM;
}
-}
+ sriov_info->sriov_enabled = false;
-/* send link change event mbox msg to active vfs under the pf */
-void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
- u8 link_status)
-{
- struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
- u16 i;
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(sriov_info->pdev);
- nic_io->link_status = link_status;
- for (i = 1; i <= nic_io->max_vfs; i++) {
- if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced)
- hinic_notify_vf_link_status(hwdev, i, link_status);
- }
-}
+ tmp_vfs = (u16)sriov_info->num_vfs;
+ sriov_info->num_vfs = 0;
+ hinic_deinit_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW(tmp_vfs - 1));
-static u16 hinic_vf_info_vlanprio(struct hinic_hwdev *hwdev, int vf_id)
-{
- struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
- u16 pf_vlan, vlanprio;
- u8 pf_qos;
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
- pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan;
- pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos;
- vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT;
+#endif
- return vlanprio;
+ return 0;
}
-static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid,
- u8 qos, int vf_id)
+int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
{
- struct hinic_vf_vlan_config vf_vlan = {0};
- u16 out_size = sizeof(vf_vlan);
- int err;
- u8 cmd;
+#ifdef CONFIG_PCI_IOV
+ struct hinic_sriov_info *sriov_info;
+ int err = 0;
+ int pre_existing_vfs = 0;
- /* VLAN 0 is a special case, don't allow it to be removed */
- if (!vid && !add)
- return 0;
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
- vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
- vf_vlan.vlan_id = vid;
- vf_vlan.qos = qos;
+ if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) {
+ nic_err(&sriov_info->pdev->dev,
+ "SR-IOV enable in process, please wait, num_vfs %d\n",
+ num_vfs);
+ return -EPERM;
+ }
- if (add)
- cmd = HINIC_PORT_CMD_SET_VF_VLAN;
- else
- cmd = HINIC_PORT_CMD_CLR_VF_VLAN;
+ pre_existing_vfs = pci_num_vf(sriov_info->pdev);
- err = hinic_port_msg_cmd(hwdev, cmd, &vf_vlan,
- sizeof(vf_vlan), &vf_vlan, &out_size);
- if (err || !out_size || vf_vlan.status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d vlan, err: %d, status: 0x%x, out size: 0x%x\n",
- HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size);
- return -EFAULT;
+ if (num_vfs > pci_sriov_get_totalvfs(sriov_info->pdev)) {
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return -ERANGE;
+ }
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs) {
+ err = hinic_pci_sriov_disable(sriov_info->pdev);
+ if (err) {
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+ } else if (pre_existing_vfs == num_vfs) {
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return num_vfs;
+ }
+
+ err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW((u16)num_vfs - 1));
+ if (err) {
+ nic_err(&sriov_info->pdev->dev,
+ "Failed to init vf in hardware before enable sriov, error %d\n",
+ err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
}
+ err = pci_enable_sriov(sriov_info->pdev, num_vfs);
+ if (err) {
+ nic_err(&sriov_info->pdev->dev,
+ "Failed to enable SR-IOV, error %d\n", err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ sriov_info->sriov_enabled = true;
+ sriov_info->num_vfs = num_vfs;
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+
+ return num_vfs;
+#else
+
return 0;
+#endif
}
-static int hinic_set_vf_tx_rate_max_min(struct hinic_hwdev *hwdev, u16 vf_id,
- u32 max_rate, u32 min_rate)
+static bool hinic_is_support_sriov_configure(struct pci_dev *pdev)
{
- struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
- struct hinic_tx_rate_cfg_max_min rate_cfg = {0};
- u16 out_size = sizeof(rate_cfg);
- int err;
+ enum hinic_init_state state = hinic_get_init_state(pdev);
+ struct hinic_sriov_info *sriov_info;
- rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
- rate_cfg.max_rate = max_rate;
- rate_cfg.min_rate = min_rate;
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE,
- &rate_cfg, sizeof(rate_cfg), &rate_cfg,
- &out_size);
- if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED &&
- rate_cfg.status) || err || !out_size) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
- HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err,
- rate_cfg.status, out_size);
- return -EIO;
+ if (state < HINIC_INIT_STATE_NIC_INITED) {
+ nic_err(&pdev->dev, "NIC device not initialized, don't support to configure sriov\n");
+ return false;
}
- if (!rate_cfg.status) {
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate;
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate;
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+ if (FUNC_SRIOV_FIX_NUM_VF(sriov_info->hwdev)) {
+ nic_err(&pdev->dev, "Don't support to changed sriov configuration\n");
+ return false;
}
- return rate_cfg.status;
+ return true;
}
-static int hinic_set_vf_rate_limit(struct hinic_hwdev *hwdev, u16 vf_id,
- u32 tx_rate)
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
{
- struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
- struct hinic_tx_rate_cfg rate_cfg = {0};
- u16 out_size = sizeof(rate_cfg);
- int err;
+ struct hinic_sriov_info *sriov_info;
- rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
- rate_cfg.tx_rate = tx_rate;
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_RATE,
- &rate_cfg, sizeof(rate_cfg), &rate_cfg,
- &out_size);
- if (err || !out_size || rate_cfg.status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
- HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status,
- out_size);
- if (rate_cfg.status)
- return rate_cfg.status;
+ if (!hinic_is_support_sriov_configure(dev))
+ return -EFAULT;
- return -EIO;
- }
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate;
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0;
+ if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state))
+ return -EFAULT;
- return 0;
+ if (!num_vfs)
+ return hinic_pci_sriov_disable(dev);
+ else
+ return hinic_pci_sriov_enable(dev, num_vfs);
}
-static int hinic_set_vf_tx_rate(struct hinic_hwdev *hwdev, u16 vf_id,
- u32 max_rate, u32 min_rate)
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
int err;
- err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate);
- if (err != HINIC_MGMT_CMD_UNSUPPORTED)
- return err;
-
- if (min_rate) {
- dev_err(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate\n");
+ if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) {
+ nicif_err(adapter, drv, netdev,
+ "Current function don't support to set vf mac\n");
return -EOPNOTSUPP;
}
- dev_info(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate, force min_tx_rate = max_tx_rate\n");
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ if (is_multicast_ether_addr(mac) || /*lint !e574*/
+ vf >= sriov_info->num_vfs) /*lint !e574*/
+ return -EINVAL;
+
+ err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac);
+ if (err) {
+ nicif_info(adapter, drv, netdev, "Failed to set MAC %pM on VF %d\n",
+ mac, vf);
+ return err;
+ }
+
+ if (is_zero_ether_addr(mac))
+ nicif_info(adapter, drv, netdev, "Removing MAC on VF %d\n", vf);
+ else
+ nicif_info(adapter, drv, netdev, "Setting MAC %pM on VF %d\n",
+ mac, vf);
+ nicif_info(adapter, drv, netdev, "Reload the VF driver to make this change effective\n");
- return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate);
+ return 0;
}
-static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id)
+/*lint -save -e574 -e734*/
+static int set_hw_vf_vlan(struct hinic_sriov_info *sriov_info,
+ u16 cur_vlanprio, int vf, u16 vlan, u8 qos)
{
- struct vf_data_storage *vf_info;
- u16 func_id, vlan_id;
int err = 0;
+ u16 old_vlan = cur_vlanprio & VLAN_VID_MASK;
- vf_info = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
- if (vf_info->pf_set_mac) {
- func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
-
- vlan_id = 0;
-
- err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, vlan_id,
- func_id);
- if (err) {
- dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set VF %d MAC\n",
- HW_VF_ID_TO_OS(vf_id));
- return err;
+ if (vlan || qos) {
+ if (cur_vlanprio) {
+ err = hinic_kill_vf_vlan(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ if (err) {
+ nic_err(&sriov_info->pdev->dev, "Failed to delete vf %d old vlan %d\n",
+ vf, old_vlan);
+ return err;
+ }
}
- }
-
- if (hinic_vf_info_vlanprio(hwdev, vf_id)) {
- err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan,
- vf_info->pf_qos, vf_id);
+ err = hinic_add_vf_vlan(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf), vlan, qos);
if (err) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to add VF %d VLAN_QOS\n",
- HW_VF_ID_TO_OS(vf_id));
+ nic_err(&sriov_info->pdev->dev, "Failed to add vf %d new vlan %d\n",
+ vf, vlan);
return err;
}
- }
-
- if (vf_info->max_rate) {
- err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate,
- vf_info->min_rate);
+ } else {
+ err = hinic_kill_vf_vlan(sriov_info->hwdev, OS_VF_ID_TO_HW(vf));
if (err) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d max rate: %d, min rate: %d\n",
- HW_VF_ID_TO_OS(vf_id), vf_info->max_rate,
- vf_info->min_rate);
+ nic_err(&sriov_info->pdev->dev, "Failed to delete vf %d vlan %d\n",
+ vf, old_vlan);
return err;
}
}
- return 0;
+ return hinic_update_mac_vlan(sriov_info->hwdev, old_vlan, vlan,
+ OS_VF_ID_TO_HW(vf));
}
-static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
- struct hinic_register_vf *register_info = buf_out;
- struct hinic_hwdev *hw_dev = hwdev;
- struct hinic_func_to_io *nic_io;
- int err;
-
- nic_io = &hw_dev->func_to_io;
- if (vf_id > nic_io->max_vfs) {
- dev_err(&hw_dev->hwif->pdev->dev, "Register VF id %d exceed limit[0-%d]\n",
- HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs));
- register_info->status = EFAULT;
- return -EFAULT;
- }
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ u16 vlanprio, cur_vlanprio;
- *out_size = sizeof(*register_info);
- err = hinic_init_vf_config(hw_dev, vf_id);
- if (err) {
- register_info->status = EFAULT;
- return err;
+ if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) {
+ nicif_err(adapter, drv, netdev,
+ "Current function don't support to set vf vlan\n");
+ return -EOPNOTSUPP;
}
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true;
-
- return 0;
-}
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
-static int hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
-{
- struct hinic_hwdev *hw_dev = hwdev;
- struct hinic_func_to_io *nic_io;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
- nic_io = &hw_dev->func_to_io;
- *out_size = 0;
- if (vf_id > nic_io->max_vfs)
+ vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT;
+ cur_vlanprio = hinic_vf_info_vlanprio(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ /* duplicate request, so just return success */
+ if (vlanprio == cur_vlanprio)
return 0;
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false;
-
- return 0;
-}
-
-static int hinic_change_vf_mtu_msg_handler(void *hwdev, u16 vf_id,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
-{
- struct hinic_hwdev *hw_dev = hwdev;
- int err;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, buf_in,
- in_size, buf_out, out_size);
- if (err) {
- dev_err(&hw_dev->hwif->pdev->dev, "Failed to set VF %u mtu\n",
- vf_id);
- return err;
- }
-
- return 0;
+ return set_hw_vf_vlan(sriov_info, cur_vlanprio, vf, vlan, qos);
}
-static int hinic_get_vf_mac_msg_handler(void *hwdev, u16 vf_id,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
- struct hinic_port_mac_cmd *mac_info = buf_out;
- struct hinic_hwdev *dev = hwdev;
- struct hinic_func_to_io *nic_io;
- struct vf_data_storage *vf_info;
-
- nic_io = &dev->func_to_io;
- vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
-
- memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN);
- mac_info->status = 0;
- *out_size = sizeof(*mac_info);
-
- return 0;
-}
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ int err = 0;
+ bool cur_spoofchk;
-static int hinic_set_vf_mac_msg_handler(void *hwdev, u16 vf_id,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
-{
- struct hinic_port_mac_cmd *mac_out = buf_out;
- struct hinic_port_mac_cmd *mac_in = buf_in;
- struct hinic_hwdev *hw_dev = hwdev;
- struct hinic_func_to_io *nic_io;
- struct vf_data_storage *vf_info;
- int err;
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
- nic_io = &hw_dev->func_to_io;
- vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
- if (vf_info->pf_set_mac && !(vf_info->trust) &&
- is_valid_ether_addr(mac_in->mac)) {
- dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF %d MAC address\n",
- HW_VF_ID_TO_OS(vf_id));
- mac_out->status = HINIC_PF_SET_VF_ALREADY;
- *out_size = sizeof(*mac_out);
+ cur_spoofchk = hinic_vf_info_spoofchk(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ /* same request, so just return success */
+ if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
return 0;
- }
- err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_SET_MAC, buf_in,
- in_size, buf_out, out_size);
- if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
- dev_err(&hw_dev->hwif->pdev->dev,
- "Failed to set VF %d MAC address, err: %d, status: 0x%x, out size: 0x%x\n",
- HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
- return -EFAULT;
+ err = hinic_set_vf_spoofchk(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf), setting);
+
+ if (!err) {
+ nicif_info(adapter, drv, netdev, "Set VF %d spoofchk %s\n",
+ vf, setting ? "on" : "off");
+ } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ nicif_err(adapter, drv, netdev,
+ "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n");
+ err = -EOPNOTSUPP;
}
return err;
}
-static int hinic_del_vf_mac_msg_handler(void *hwdev, u16 vf_id,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
{
- struct hinic_port_mac_cmd *mac_out = buf_out;
- struct hinic_port_mac_cmd *mac_in = buf_in;
- struct hinic_hwdev *hw_dev = hwdev;
- struct hinic_func_to_io *nic_io;
- struct vf_data_storage *vf_info;
- int err;
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ int err = 0;
+ bool cur_trust;
- nic_io = &hw_dev->func_to_io;
- vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
- if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) &&
- !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) {
- dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF mac.\n");
- mac_out->status = HINIC_PF_SET_VF_ALREADY;
- *out_size = sizeof(*mac_out);
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_trust = hinic_vf_info_trust(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ /* same request, so just return success */
+ if ((setting && cur_trust) || (!setting && !cur_trust))
return 0;
- }
- err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_DEL_MAC, buf_in,
- in_size, buf_out, out_size);
- if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
- dev_err(&hw_dev->hwif->pdev->dev, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n",
- HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
- return -EFAULT;
- }
+ err = hinic_set_vf_trust(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf), setting);
+ if (!err)
+ nicif_info(adapter, drv, netdev, "Set VF %d trusted %s succeed\n",
+ vf, setting ? "on" : "off");
+ else
+ nicif_err(adapter, drv, netdev, "Failed set VF %d trusted %s\n",
+ vf, setting ? "on" : "off");
return err;
}
-static int hinic_get_vf_link_status_msg_handler(void *hwdev, u16 vf_id,
- void *buf_in, u16 in_size,
- void *buf_out, u16 *out_size)
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
{
- struct hinic_port_link_cmd *get_link = buf_out;
- struct hinic_hwdev *hw_dev = hwdev;
- struct vf_data_storage *vf_infos;
- struct hinic_func_to_io *nic_io;
- bool link_forced, link_up;
-
- nic_io = &hw_dev->func_to_io;
- vf_infos = nic_io->vf_infos;
- link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced;
- link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up;
-
- if (link_forced)
- get_link->state = link_up ?
- HINIC_LINK_STATE_UP : HINIC_LINK_STATE_DOWN;
- else
- get_link->state = nic_io->link_status;
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
- get_link->status = 0;
- *out_size = sizeof(*get_link);
+ hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi);
return 0;
}
-static bool check_func_table(struct hinic_hwdev *hwdev, u16 func_idx,
- void *buf_in, u16 in_size)
+/**
+ * hinic_ndo_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link: required link state
+ * Return: 0 - success, negative - failure
+ * Set the link state of a specified VF, regardless of physical link state
+ */
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
{
- struct hinic_cmd_fw_ctxt *function_table = buf_in;
-
- if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size) ||
- !function_table->rx_buf_sz)
- return false;
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ static const char * const vf_link[] = {"auto", "enable", "disable"};
+ int err;
- return true;
-}
+ if (FUNC_FORCE_LINK_UP(adapter->hwdev)) {
+ nicif_err(adapter, drv, netdev,
+ "Current function don't support to set vf link state\n");
+ return -EOPNOTSUPP;
+ }
-static struct vf_cmd_msg_handle nic_vf_cmd_msg_handler[] = {
- {HINIC_PORT_CMD_VF_REGISTER, hinic_register_vf_msg_handler},
- {HINIC_PORT_CMD_VF_UNREGISTER, hinic_unregister_vf_msg_handler},
- {HINIC_PORT_CMD_CHANGE_MTU, hinic_change_vf_mtu_msg_handler},
- {HINIC_PORT_CMD_GET_MAC, hinic_get_vf_mac_msg_handler},
- {HINIC_PORT_CMD_SET_MAC, hinic_set_vf_mac_msg_handler},
- {HINIC_PORT_CMD_DEL_MAC, hinic_del_vf_mac_msg_handler},
- {HINIC_PORT_CMD_GET_LINK_STATE, hinic_get_vf_link_status_msg_handler},
-};
-
-static struct vf_cmd_check_handle nic_cmd_support_vf[] = {
- {HINIC_PORT_CMD_VF_REGISTER, NULL},
- {HINIC_PORT_CMD_VF_UNREGISTER, NULL},
- {HINIC_PORT_CMD_CHANGE_MTU, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_ADD_VLAN, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_DEL_VLAN, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_MAC, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_MAC, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_DEL_MAC, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_RX_MODE, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_PAUSE_INFO, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_LINK_STATE, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_LRO, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_RX_CSUM, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_VPORT_STAT, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_CLEAN_VPORT_STAT, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
- hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_RSS_TEMP_MGR, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_RSS_CFG, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_FWCTXT_INIT, check_func_table},
- {HINIC_PORT_CMD_GET_MGMT_VERSION, NULL},
- {HINIC_PORT_CMD_SET_FUNC_STATE, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_GLOBAL_QPN, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_TSO, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_RQ_IQ_MAP, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_LINK_STATUS_REPORT, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_CAP, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_GET_VF_COS, NULL},
- {HINIC_PORT_CMD_SET_VHD_CFG, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_VLAN_FILTER, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_Q_FILTER, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_TCAM_FILTER, NULL},
- {HINIC_PORT_CMD_UP_TC_ADD_FLOW, NULL},
- {HINIC_PORT_CMD_UP_TC_DEL_FLOW, NULL},
- {HINIC_PORT_CMD_UP_TC_FLUSH_TCAM, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_UP_TC_ENABLE, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_CABLE_PLUG_EVENT, NULL},
- {HINIC_PORT_CMD_LINK_ERR_EVENT, NULL},
- {HINIC_PORT_CMD_SET_PORT_STATE, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_ETS, NULL},
- {HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE, NULL},
- {HINIC_PORT_CMD_RESET_LINK_CFG, hinic_mbox_check_func_id_8B},
- {HINIC_PORT_CMD_SET_LINK_FOLLOW, NULL},
- {HINIC_PORT_CMD_CLEAR_QP_RES, NULL},
-};
-
-#define CHECK_IPSU_15BIT 0X8000
-
-static
-struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct hinic_dev *nic_dev = netdev_priv(netdev);
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
+ /* validate the request */
+ if (vf_id >= sriov_info->num_vfs) {
+ nicif_err(adapter, drv, netdev,
+ "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
- return &nic_dev->sriov_info;
-}
+ err = hinic_set_vf_link_state(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf_id), link);
-static int hinic_check_mac_info(u8 status, u16 vlan_id)
-{
- if ((status && status != HINIC_MGMT_STATUS_EXIST) ||
- (vlan_id & CHECK_IPSU_15BIT &&
- status == HINIC_MGMT_STATUS_EXIST))
- return -EINVAL;
+ if (!err)
+ nicif_info(adapter, drv, netdev, "Set VF %d link state: %s\n",
+ vf_id, vf_link[link]);
- return 0;
+ return err;
}
-#define HINIC_VLAN_ID_MASK 0x7FFF
+#define HINIC_TX_RATE_TABLE_FULL 12
-static int hinic_update_mac(struct hinic_hwdev *hwdev, u8 *old_mac,
- u8 *new_mac, u16 vlan_id, u16 func_id)
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate)
{
- struct hinic_port_mac_update mac_info = {0};
- u16 out_size = sizeof(mac_info);
- int err;
+ struct hinic_nic_dev *adapter = netdev_priv(netdev);
+ struct nic_port_info port_info = {0};
+ struct hinic_sriov_info *sriov_info;
+ u8 link_status = 0;
+ u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000, SPEED_100000};
+ int err = 0;
- if (!hwdev || !old_mac || !new_mac)
- return -EINVAL;
+ if (!FUNC_SUPPORT_RATE_LIMIT(adapter->hwdev)) {
+ nicif_err(adapter, drv, netdev,
+ "Current function don't support to set vf rate limit\n");
+ return -EOPNOTSUPP;
+ }
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev);
- if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
- dev_err(&hwdev->hwif->pdev->dev, "Invalid VLAN number: %d\n",
- (vlan_id & HINIC_VLAN_ID_MASK));
+ /* verify VF is active */
+ if (vf >= sriov_info->num_vfs) {
+ nicif_err(adapter, drv, netdev, "VF number must be less than %d\n",
+ sriov_info->num_vfs);
return -EINVAL;
}
- mac_info.func_id = func_id;
- mac_info.vlan_id = vlan_id;
- memcpy(mac_info.old_mac, old_mac, ETH_ALEN);
- memcpy(mac_info.new_mac, new_mac, ETH_ALEN);
+ if (max_tx_rate < min_tx_rate) {
+ nicif_err(adapter, drv, netdev, "Invalid rate, max rate %d must greater than min rate %d\n",
+ max_tx_rate, min_tx_rate);
+ return -EINVAL;
+ }
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_UPDATE_MAC, &mac_info,
- sizeof(mac_info), &mac_info, &out_size);
+ err = hinic_get_link_state(adapter->hwdev, &link_status);
+ if (err) {
+ nicif_err(adapter, drv, netdev,
+ "Get link status failed when set vf tx rate\n");
+ return -EIO;
+ }
- if (err || !out_size ||
- hinic_check_mac_info(mac_info.status, mac_info.vlan_id)) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n",
- err, mac_info.status, out_size);
+ if (!link_status) {
+ nicif_err(adapter, drv, netdev,
+ "Link status must be up when set vf tx rate\n");
return -EINVAL;
}
- if (mac_info.status == HINIC_MGMT_STATUS_EXIST)
- dev_warn(&hwdev->hwif->pdev->dev, "MAC is repeated. Ignore update operation\n");
-
- return 0;
-}
-
-static void hinic_get_vf_config(struct hinic_hwdev *hwdev, u16 vf_id,
- struct ifla_vf_info *ivi)
-{
- struct vf_data_storage *vfinfo;
-
- vfinfo = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
-
- ivi->vf = HW_VF_ID_TO_OS(vf_id);
- memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN);
- ivi->vlan = vfinfo->pf_vlan;
- ivi->qos = vfinfo->pf_qos;
- ivi->spoofchk = vfinfo->spoofchk;
- ivi->trusted = vfinfo->trust;
- ivi->max_tx_rate = vfinfo->max_rate;
- ivi->min_tx_rate = vfinfo->min_rate;
-
- if (!vfinfo->link_forced)
- ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
- else if (vfinfo->link_up)
- ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
- else
- ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
-}
-
-int hinic_ndo_get_vf_config(struct net_device *netdev,
- int vf, struct ifla_vf_info *ivi)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_sriov_info *sriov_info;
-
- sriov_info = &nic_dev->sriov_info;
- if (vf >= sriov_info->num_vfs)
- return -EINVAL;
-
- hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi);
-
- return 0;
-}
-
-static int hinic_set_vf_mac(struct hinic_hwdev *hwdev, int vf,
- unsigned char *mac_addr)
-{
- struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
- struct vf_data_storage *vf_info;
- u16 func_id;
- int err;
-
- vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
-
- /* duplicate request, so just return success */
- if (vf_info->pf_set_mac &&
- !memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN))
- return 0;
-
- vf_info->pf_set_mac = true;
-
- func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf;
- err = hinic_update_mac(hwdev, vf_info->vf_mac_addr,
- mac_addr, 0, func_id);
- if (err) {
- vf_info->pf_set_mac = false;
- return err;
- }
-
- memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN);
-
- return 0;
-}
-
-int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_sriov_info *sriov_info;
- int err;
-
- sriov_info = &nic_dev->sriov_info;
- if (!is_valid_ether_addr(mac) || vf >= sriov_info->num_vfs)
- return -EINVAL;
-
- err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac);
- if (err)
- return err;
-
- netif_info(nic_dev, drv, netdev, "Setting MAC %pM on VF %d\n", mac, vf);
- netif_info(nic_dev, drv, netdev, "Reload the VF driver to make this change effective.");
-
- return 0;
-}
-
-static int hinic_add_vf_vlan(struct hinic_hwdev *hwdev, int vf_id,
- u16 vlan, u8 qos)
-{
- struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
- int err;
-
- err = hinic_set_vf_vlan(hwdev, true, vlan, qos, vf_id);
- if (err)
- return err;
-
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan;
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos;
-
- dev_info(&hwdev->hwif->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
- vlan, qos, HW_VF_ID_TO_OS(vf_id));
- return 0;
-}
-
-static int hinic_kill_vf_vlan(struct hinic_hwdev *hwdev, int vf_id)
-{
- struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
- int err;
-
- err = hinic_set_vf_vlan(hwdev, false,
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos,
- vf_id);
- if (err)
- return err;
-
- dev_info(&hwdev->hwif->pdev->dev, "Remove VLAN %d on VF %d\n",
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
- HW_VF_ID_TO_OS(vf_id));
-
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0;
- nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0;
-
- return 0;
-}
-
-static int hinic_update_mac_vlan(struct hinic_dev *nic_dev, u16 old_vlan,
- u16 new_vlan, int vf_id)
-{
- struct vf_data_storage *vf_info;
- u16 vlan_id;
- int err;
-
- if (!nic_dev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID)
- return -EINVAL;
-
- vf_info = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
- if (!vf_info->pf_set_mac)
- return 0;
-
- vlan_id = old_vlan;
- if (vlan_id)
- vlan_id |= HINIC_ADD_VLAN_IN_MAC;
-
- err = hinic_port_del_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
- if (err) {
- dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to delete VF %d MAC %pM vlan %d\n",
- HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, old_vlan);
- return err;
- }
-
- vlan_id = new_vlan;
- if (vlan_id)
- vlan_id |= HINIC_ADD_VLAN_IN_MAC;
-
- err = hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
- if (err) {
- dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to add VF %d MAC %pM vlan %d\n",
- HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, new_vlan);
- goto out;
- }
-
- return 0;
-
-out:
- vlan_id = old_vlan;
- if (vlan_id)
- vlan_id |= HINIC_ADD_VLAN_IN_MAC;
- hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
-
- return err;
-}
-
-static int set_hw_vf_vlan(struct hinic_dev *nic_dev,
- u16 cur_vlanprio, int vf, u16 vlan, u8 qos)
-{
- u16 old_vlan = cur_vlanprio & VLAN_VID_MASK;
- int err = 0;
-
- if (vlan || qos) {
- if (cur_vlanprio) {
- err = hinic_kill_vf_vlan(nic_dev->hwdev,
- OS_VF_ID_TO_HW(vf));
- if (err) {
- dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d old vlan %d\n",
- vf, old_vlan);
- goto out;
- }
- }
- err = hinic_add_vf_vlan(nic_dev->hwdev,
- OS_VF_ID_TO_HW(vf), vlan, qos);
- if (err) {
- dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to add vf %d new vlan %d\n",
- vf, vlan);
- goto out;
- }
- } else {
- err = hinic_kill_vf_vlan(nic_dev->hwdev, OS_VF_ID_TO_HW(vf));
- if (err) {
- dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d vlan %d\n",
- vf, old_vlan);
- goto out;
- }
- }
-
- err = hinic_update_mac_vlan(nic_dev, old_vlan, vlan,
- OS_VF_ID_TO_HW(vf));
-
-out:
- return err;
-}
-
-int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
- __be16 vlan_proto)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_sriov_info *sriov_info;
- u16 vlanprio, cur_vlanprio;
-
- sriov_info = &nic_dev->sriov_info;
- if (vf >= sriov_info->num_vfs || vlan >= VLAN_N_VID || qos > HINIC_MAX_QOS)
- return -EINVAL;
- if (vlan_proto != htons(ETH_P_8021Q))
- return -EPROTONOSUPPORT;
- vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT;
- cur_vlanprio = hinic_vf_info_vlanprio(nic_dev->hwdev,
- OS_VF_ID_TO_HW(vf));
- /* duplicate request, so just return success */
- if (vlanprio == cur_vlanprio)
- return 0;
-
- return set_hw_vf_vlan(nic_dev, cur_vlanprio, vf, vlan, qos);
-}
-
-static int hinic_set_vf_trust(struct hinic_hwdev *hwdev, u16 vf_id,
- bool trust)
-{
- struct vf_data_storage *vf_infos;
- struct hinic_func_to_io *nic_io;
-
- if (!hwdev)
- return -EINVAL;
-
- nic_io = &hwdev->func_to_io;
- vf_infos = nic_io->vf_infos;
- vf_infos[vf_id].trust = trust;
-
- return 0;
-}
-
-int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
-{
- struct hinic_dev *adapter = netdev_priv(netdev);
- struct hinic_sriov_info *sriov_info;
- struct hinic_func_to_io *nic_io;
- bool cur_trust;
- int err;
-
- sriov_info = &adapter->sriov_info;
- nic_io = &adapter->hwdev->func_to_io;
-
- if (vf >= sriov_info->num_vfs)
- return -EINVAL;
-
- cur_trust = nic_io->vf_infos[vf].trust;
- /* same request, so just return success */
- if (setting == cur_trust)
- return 0;
-
- err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
- if (!err)
- dev_info(&sriov_info->pdev->dev, "Set VF %d trusted %s succeed\n",
- vf, setting ? "on" : "off");
- else
- dev_err(&sriov_info->pdev->dev, "Failed set VF %d trusted %s\n",
- vf, setting ? "on" : "off");
-
- return err;
-}
-
-int hinic_ndo_set_vf_bw(struct net_device *netdev,
- int vf, int min_tx_rate, int max_tx_rate)
-{
- static const u32 speeds[] = {
- SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
- SPEED_25000, SPEED_40000, SPEED_100000
- };
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_port_cap port_cap = { 0 };
- enum hinic_port_link_state link_state;
- int err;
-
- if (vf >= nic_dev->sriov_info.num_vfs) {
- netif_err(nic_dev, drv, netdev, "VF number must be less than %d\n",
- nic_dev->sriov_info.num_vfs);
- return -EINVAL;
- }
-
- err = hinic_port_link_state(nic_dev, &link_state);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Get link status failed when setting vf tx rate\n");
- return -EIO;
- }
-
- if (link_state == HINIC_LINK_STATE_DOWN) {
- netif_err(nic_dev, drv, netdev,
- "Link status must be up when setting vf tx rate\n");
- return -EPERM;
- }
-
- err = hinic_port_get_cap(nic_dev, &port_cap);
- if (err || port_cap.speed > LINK_SPEED_100GB)
- return -EIO;
+ err = hinic_get_port_info(adapter->hwdev, &port_info);
+ if (err || port_info.speed > LINK_SPEED_100GB)
+ return -EIO;
/* rate limit cannot be less than 0 and greater than link speed */
- if (max_tx_rate < 0 || max_tx_rate > speeds[port_cap.speed]) {
- netif_err(nic_dev, drv, netdev, "Max tx rate must be in [0 - %d]\n",
- speeds[port_cap.speed]);
+ if (max_tx_rate < 0 || max_tx_rate > speeds[port_info.speed]) {
+ nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %d]\n",
+ speeds[port_info.speed]);
return -EINVAL;
}
- err = hinic_set_vf_tx_rate(nic_dev->hwdev, OS_VF_ID_TO_HW(vf),
+ err = hinic_set_vf_tx_rate(adapter->hwdev, OS_VF_ID_TO_HW(vf),
max_tx_rate, min_tx_rate);
if (err) {
- netif_err(nic_dev, drv, netdev,
+ nicif_err(adapter, drv, netdev,
"Unable to set VF %d max rate %d min rate %d%s\n",
vf, max_tx_rate, min_tx_rate,
err == HINIC_TX_RATE_TABLE_FULL ?
@@ -906,459 +453,11 @@ int hinic_ndo_set_vf_bw(struct net_device *netdev,
return -EIO;
}
- netif_info(nic_dev, drv, netdev,
+ nicif_info(adapter, drv, netdev,
"Set VF %d max tx rate %d min tx rate %d successfully\n",
vf, max_tx_rate, min_tx_rate);
return 0;
}
-static int hinic_set_vf_spoofchk(struct hinic_hwdev *hwdev, u16 vf_id,
- bool spoofchk)
-{
- struct hinic_spoofchk_set spoofchk_cfg = {0};
- struct vf_data_storage *vf_infos = NULL;
- u16 out_size = sizeof(spoofchk_cfg);
- int err;
-
- if (!hwdev)
- return -EINVAL;
-
- vf_infos = hwdev->func_to_io.vf_infos;
-
- spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
- spoofchk_cfg.state = spoofchk ? 1 : 0;
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ENABLE_SPOOFCHK,
- &spoofchk_cfg, sizeof(spoofchk_cfg),
- &spoofchk_cfg, &out_size);
- if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
- err = HINIC_MGMT_CMD_UNSUPPORTED;
- } else if (err || !out_size || spoofchk_cfg.status) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n",
- HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status,
- out_size);
- err = -EIO;
- }
-
- vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk;
-
- return err;
-}
-
-int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_sriov_info *sriov_info;
- bool cur_spoofchk;
- int err;
-
- sriov_info = &nic_dev->sriov_info;
- if (vf >= sriov_info->num_vfs)
- return -EINVAL;
-
- cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
-
- /* same request, so just return success */
- if (setting == cur_spoofchk)
- return 0;
-
- err = hinic_set_vf_spoofchk(sriov_info->hwdev,
- OS_VF_ID_TO_HW(vf), setting);
- if (!err) {
- netif_info(nic_dev, drv, netdev, "Set VF %d spoofchk %s successfully\n",
- vf, setting ? "on" : "off");
- } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
- netif_err(nic_dev, drv, netdev,
- "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n");
- err = -EOPNOTSUPP;
- }
-
- return err;
-}
-
-static int hinic_set_vf_link_state(struct hinic_hwdev *hwdev, u16 vf_id,
- int link)
-{
- struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
- struct vf_data_storage *vf_infos = nic_io->vf_infos;
- u8 link_status = 0;
-
- switch (link) {
- case HINIC_IFLA_VF_LINK_STATE_AUTO:
- vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false;
- vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ?
- true : false;
- link_status = nic_io->link_status;
- break;
- case HINIC_IFLA_VF_LINK_STATE_ENABLE:
- vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
- vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true;
- link_status = HINIC_LINK_UP;
- break;
- case HINIC_IFLA_VF_LINK_STATE_DISABLE:
- vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
- vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false;
- link_status = HINIC_LINK_DOWN;
- break;
- default:
- return -EINVAL;
- }
-
- /* Notify the VF of its new link state */
- hinic_notify_vf_link_status(hwdev, vf_id, link_status);
-
- return 0;
-}
-
-int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_sriov_info *sriov_info;
-
- sriov_info = &nic_dev->sriov_info;
-
- if (vf_id >= sriov_info->num_vfs) {
- netif_err(nic_dev, drv, netdev,
- "Invalid VF Identifier %d\n", vf_id);
- return -EINVAL;
- }
-
- return hinic_set_vf_link_state(sriov_info->hwdev,
- OS_VF_ID_TO_HW(vf_id), link);
-}
-
-/* pf receive message from vf */
-static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size)
-{
- u8 size = ARRAY_SIZE(nic_cmd_support_vf);
- struct vf_cmd_msg_handle *vf_msg_handle;
- struct hinic_hwdev *dev = hwdev;
- struct hinic_func_to_io *nic_io;
- struct hinic_pfhwdev *pfhwdev;
- int err = 0;
- u32 i;
-
- if (!hwdev)
- return -EINVAL;
-
- if (!hinic_mbox_check_cmd_valid(hwdev, nic_cmd_support_vf, vf_id, cmd,
- buf_in, in_size, size)) {
- dev_err(&dev->hwif->pdev->dev,
- "PF Receive VF nic cmd: 0x%x, mbox len: 0x%x is invalid\n",
- cmd, in_size);
- return HINIC_MBOX_VF_CMD_ERROR;
- }
-
- pfhwdev = container_of(dev, struct hinic_pfhwdev, hwdev);
- nic_io = &dev->func_to_io;
- for (i = 0; i < ARRAY_SIZE(nic_vf_cmd_msg_handler); i++) {
- vf_msg_handle = &nic_vf_cmd_msg_handler[i];
- if (cmd == vf_msg_handle->cmd &&
- vf_msg_handle->cmd_msg_handler) {
- err = vf_msg_handle->cmd_msg_handler(hwdev, vf_id,
- buf_in, in_size,
- buf_out,
- out_size);
- break;
- }
- }
- if (i == ARRAY_SIZE(nic_vf_cmd_msg_handler))
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
- cmd, buf_in, in_size, buf_out,
- out_size, HINIC_MGMT_MSG_SYNC);
-
- if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
- dev_err(&nic_io->hwif->pdev->dev, "PF receive VF L2NIC cmd: %d process error, err:%d\n",
- cmd, err);
- return err;
-}
-
-static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_dev_cap *dev_cap = buf_out;
- struct hinic_hwdev *dev = hwdev;
- struct hinic_cap *cap;
-
- cap = &dev->nic_cap;
- memset(dev_cap, 0, sizeof(*dev_cap));
-
- dev_cap->max_vf = cap->max_vf;
- dev_cap->max_sqs = cap->max_vf_qps;
- dev_cap->max_rqs = cap->max_vf_qps;
- dev_cap->port_id = dev->port_id;
-
- *out_size = sizeof(*dev_cap);
-
- return 0;
-}
-
-static int hinic_init_vf_infos(struct hinic_func_to_io *nic_io, u16 vf_id)
-{
- struct vf_data_storage *vf_infos = nic_io->vf_infos;
-
- if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) {
- dev_warn(&nic_io->hwif->pdev->dev, "Module Parameter set_vf_link_state value %d is out of range, resetting to %d\n",
- set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO);
- set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO;
- }
-
- switch (set_vf_link_state) {
- case HINIC_IFLA_VF_LINK_STATE_AUTO:
- vf_infos[vf_id].link_forced = false;
- break;
- case HINIC_IFLA_VF_LINK_STATE_ENABLE:
- vf_infos[vf_id].link_forced = true;
- vf_infos[vf_id].link_up = true;
- break;
- case HINIC_IFLA_VF_LINK_STATE_DISABLE:
- vf_infos[vf_id].link_forced = true;
- vf_infos[vf_id].link_up = false;
- break;
- default:
- dev_err(&nic_io->hwif->pdev->dev, "Invalid input parameter set_vf_link_state: %d\n",
- set_vf_link_state);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
-{
- struct vf_data_storage *vf_infos;
-
- vf_infos = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
- if (vf_infos->pf_set_mac)
- hinic_port_del_mac(nic_dev, vf_infos->vf_mac_addr, 0);
-
- if (hinic_vf_info_vlanprio(nic_dev->hwdev, vf_id))
- hinic_kill_vf_vlan(nic_dev->hwdev, vf_id);
-
- if (vf_infos->max_rate)
- hinic_set_vf_tx_rate(nic_dev->hwdev, vf_id, 0, 0);
-
- if (vf_infos->spoofchk)
- hinic_set_vf_spoofchk(nic_dev->hwdev, vf_id, false);
-
- if (vf_infos->trust)
- hinic_set_vf_trust(nic_dev->hwdev, vf_id, false);
-
- memset(vf_infos, 0, sizeof(*vf_infos));
- /* set vf_infos to default */
- hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
-}
-
-static void hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
- u16 start_vf_id, u16 end_vf_id)
-{
- struct hinic_dev *nic_dev;
- u16 func_idx, idx;
-
- nic_dev = container_of(sriov_info, struct hinic_dev, sriov_info);
-
- for (idx = start_vf_id; idx <= end_vf_id; idx++) {
- func_idx = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + idx;
- hinic_set_wq_page_size(nic_dev->hwdev, func_idx,
- HINIC_HW_WQ_PAGE_SIZE);
- hinic_clear_vf_infos(nic_dev, idx);
- }
-}
-
-int hinic_vf_func_init(struct hinic_hwdev *hwdev)
-{
- struct hinic_register_vf register_info = {0};
- u16 out_size = sizeof(register_info);
- struct hinic_func_to_io *nic_io;
- int err = 0;
- u32 size, i;
-
- err = hinic_vf_mbox_random_id_init(hwdev);
- if (err) {
- dev_err(&hwdev->hwif->pdev->dev, "Failed to init vf mbox random id, err: %d\n",
- err);
- return err;
- }
-
- nic_io = &hwdev->func_to_io;
-
- if (HINIC_IS_VF(hwdev->hwif)) {
- err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
- HINIC_PORT_CMD_VF_REGISTER,
- ®ister_info, sizeof(register_info),
- ®ister_info, &out_size, 0);
- if (err || register_info.status || !out_size) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
- err, register_info.status, out_size);
- return -EIO;
- }
- } else {
- err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_CFGM,
- cfg_mbx_pf_proc_vf_msg);
- if (err) {
- dev_err(&hwdev->hwif->pdev->dev,
- "Register PF mailbox callback failed\n");
- return err;
- }
- nic_io->max_vfs = hwdev->nic_cap.max_vf;
- size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs;
- if (size != 0) {
- nic_io->vf_infos = kzalloc(size, GFP_KERNEL);
- if (!nic_io->vf_infos) {
- err = -ENOMEM;
- goto out_free_nic_io;
- }
-
- for (i = 0; i < nic_io->max_vfs; i++) {
- err = hinic_init_vf_infos(nic_io, i);
- if (err)
- goto err_init_vf_infos;
- }
-
- err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
- nic_pf_mbox_handler);
- if (err)
- goto err_register_pf_mbox_cb;
- }
- }
-
- return 0;
-
-err_register_pf_mbox_cb:
-err_init_vf_infos:
- kfree(nic_io->vf_infos);
-out_free_nic_io:
- return err;
-}
-
-void hinic_vf_func_free(struct hinic_hwdev *hwdev)
-{
- struct hinic_register_vf unregister = {0};
- u16 out_size = sizeof(unregister);
- int err;
-
- if (HINIC_IS_VF(hwdev->hwif)) {
- err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
- HINIC_PORT_CMD_VF_UNREGISTER,
- &unregister, sizeof(unregister),
- &unregister, &out_size, 0);
- if (err || !out_size || unregister.status)
- dev_err(&hwdev->hwif->pdev->dev, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n",
- err, unregister.status, out_size);
- } else {
- if (hwdev->func_to_io.vf_infos) {
- hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
- kfree(hwdev->func_to_io.vf_infos);
- }
- }
-}
-
-static int hinic_init_vf_hw(struct hinic_hwdev *hwdev, u16 start_vf_id,
- u16 end_vf_id)
-{
- u16 i, func_idx;
- int err;
-
- /* vf use 256K as default wq page size, and can't change it */
- for (i = start_vf_id; i <= end_vf_id; i++) {
- func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + i;
- err = hinic_set_wq_page_size(hwdev, func_idx,
- HINIC_DEFAULT_WQ_PAGE_SIZE);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-int hinic_pci_sriov_disable(struct pci_dev *pdev)
-{
- struct hinic_sriov_info *sriov_info;
- u16 tmp_vfs;
-
- sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
- /* if SR-IOV is already disabled then nothing will be done */
- if (!sriov_info->sriov_enabled)
- return 0;
-
- set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
-
- /* If our VFs are assigned we cannot shut down SR-IOV
- * without causing issues, so just leave the hardware
- * available but disabled
- */
- if (pci_vfs_assigned(sriov_info->pdev)) {
- clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
- dev_warn(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
- return -EPERM;
- }
- sriov_info->sriov_enabled = false;
-
- /* disable iov and allow time for transactions to clear */
- pci_disable_sriov(sriov_info->pdev);
-
- tmp_vfs = (u16)sriov_info->num_vfs;
- sriov_info->num_vfs = 0;
- hinic_deinit_vf_hw(sriov_info, OS_VF_ID_TO_HW(0),
- OS_VF_ID_TO_HW(tmp_vfs - 1));
-
- clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
-
- return 0;
-}
-
-static int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
-{
- struct hinic_sriov_info *sriov_info;
- int err;
-
- sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
-
- if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) {
- dev_err(&pdev->dev,
- "SR-IOV enable in process, please wait, num_vfs %d\n",
- num_vfs);
- return -EPERM;
- }
-
- err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
- OS_VF_ID_TO_HW((u16)num_vfs - 1));
- if (err) {
- dev_err(&sriov_info->pdev->dev,
- "Failed to init vf in hardware before enable sriov, error %d\n",
- err);
- clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
- return err;
- }
-
- err = pci_enable_sriov(sriov_info->pdev, num_vfs);
- if (err) {
- dev_err(&pdev->dev,
- "Failed to enable SR-IOV, error %d\n", err);
- clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
- return err;
- }
-
- sriov_info->sriov_enabled = true;
- sriov_info->num_vfs = num_vfs;
- clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
-
- return num_vfs;
-}
-
-int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
-{
- struct hinic_sriov_info *sriov_info;
-
- sriov_info = hinic_get_sriov_info_by_pcidev(dev);
-
- if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state))
- return -EBUSY;
-
- if (!num_vfs)
- return hinic_pci_sriov_disable(dev);
- else
- return hinic_pci_sriov_enable(dev, num_vfs);
-}
+/*lint -restore*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
index d4d4e63d31ea..67b3174f09f0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -1,107 +1,51 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0*/
/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
*/
#ifndef HINIC_SRIOV_H
#define HINIC_SRIOV_H
-#include "hinic_hw_dev.h"
-
-#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)
-#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)
-
enum hinic_sriov_state {
HINIC_SRIOV_DISABLE,
HINIC_SRIOV_ENABLE,
HINIC_FUNC_REMOVE,
};
-enum {
- HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
- HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */
- HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */
-};
-
struct hinic_sriov_info {
struct pci_dev *pdev;
- struct hinic_hwdev *hwdev;
+ void *hwdev;
bool sriov_enabled;
unsigned int num_vfs;
unsigned long state;
};
-struct vf_data_storage {
- u8 vf_mac_addr[ETH_ALEN];
- bool registered;
- bool pf_set_mac;
- u16 pf_vlan;
- u8 pf_qos;
- u32 max_rate;
- u32 min_rate;
-
- bool link_forced;
- bool link_up; /* only valid if VF link is forced */
- bool spoofchk;
- bool trust;
-};
-
-struct hinic_register_vf {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-};
-
-struct hinic_port_mac_update {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 vlan_id;
- u16 rsvd1;
- u8 old_mac[ETH_ALEN];
- u16 rsvd2;
- u8 new_mac[ETH_ALEN];
-};
-
-struct hinic_vf_vlan_config {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_id;
- u16 vlan_id;
- u8 qos;
- u8 rsvd1[7];
-};
-
+int hinic_pci_sriov_disable(struct pci_dev *dev);
+int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
-
int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto);
-int hinic_ndo_get_vf_config(struct net_device *netdev,
- int vf, struct ifla_vf_info *ivi);
-
-int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
-
-int hinic_ndo_set_vf_bw(struct net_device *netdev,
- int vf, int min_tx_rate, int max_tx_rate);
+int hinic_ndo_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi);
int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
-int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
-
-void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
- u8 link_status);
-
-int hinic_pci_sriov_disable(struct pci_dev *dev);
-
-int hinic_vf_func_init(struct hinic_hwdev *hwdev);
-
-void hinic_vf_func_free(struct hinic_hwdev *hwdev);
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
-int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index ad47ac51a139..7eb85bfcf190 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -1,23 +1,26 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Huawei HiNIC PCI Express Linux driver
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
*/
-#include <linux/if_vlan.h>
-#include <linux/kernel.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt
+
#include <linux/netdevice.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/skbuff.h>
-#include <linux/smp.h>
-#include <asm/byteorder.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pci.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
@@ -25,220 +28,289 @@
#include <net/ipv6.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_dev.h"
-#include "hinic_dev.h"
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_nic_io.h"
+#include "hinic_nic_dev.h"
+#include "hinic_qp.h"
#include "hinic_tx.h"
+#include "hinic_dbg.h"
-#define TX_IRQ_NO_PENDING 0
-#define TX_IRQ_NO_COALESC 0
-#define TX_IRQ_NO_LLI_TIMER 0
-#define TX_IRQ_NO_CREDIT 0
-#define TX_IRQ_NO_RESEND_TIMER 0
-
-#define CI_UPDATE_NO_PENDING 0
-#define CI_UPDATE_NO_COALESC 0
-
-#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
-
-#define MIN_SKB_LEN 32
-
-#define MAX_PAYLOAD_OFFSET 221
-#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
-
-union hinic_l3 {
- struct iphdr *v4;
- struct ipv6hdr *v6;
- unsigned char *hdr;
-};
-
-union hinic_l4 {
- struct tcphdr *tcp;
- struct udphdr *udp;
- unsigned char *hdr;
-};
+#define MIN_SKB_LEN 32
+#define MAX_PAYLOAD_OFFSET 221
-enum hinic_offload_type {
- TX_OFFLOAD_TSO = BIT(0),
- TX_OFFLOAD_CSUM = BIT(1),
- TX_OFFLOAD_VLAN = BIT(2),
- TX_OFFLOAD_INVALID = BIT(3),
-};
+#define NIC_QID(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1))
-/**
- * hinic_txq_clean_stats - Clean the statistics of specific queue
- * @txq: Logical Tx Queue
- **/
-static void hinic_txq_clean_stats(struct hinic_txq *txq)
-{
- struct hinic_txq_stats *txq_stats = &txq->txq_stats;
-
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->pkts = 0;
- txq_stats->bytes = 0;
- txq_stats->tx_busy = 0;
- txq_stats->tx_wake = 0;
- txq_stats->tx_dropped = 0;
- txq_stats->big_frags_pkts = 0;
- u64_stats_update_end(&txq_stats->syncp);
+#define TXQ_STATS_INC(txq, field) \
+{ \
+ u64_stats_update_begin(&(txq)->txq_stats.syncp); \
+ (txq)->txq_stats.field++; \
+ u64_stats_update_end(&(txq)->txq_stats.syncp); \
}
-/**
- * hinic_txq_get_stats - get statistics of Tx Queue
- * @txq: Logical Tx Queue
- * @stats: return updated stats here
- **/
-void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
+void hinic_txq_get_stats(struct hinic_txq *txq,
+ struct hinic_txq_stats *stats)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
+ u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
- stats->pkts = txq_stats->pkts;
- stats->bytes = txq_stats->bytes;
- stats->tx_busy = txq_stats->tx_busy;
- stats->tx_wake = txq_stats->tx_wake;
- stats->tx_dropped = txq_stats->tx_dropped;
+ stats->bytes = txq_stats->bytes;
+ stats->packets = txq_stats->packets;
+ stats->busy = txq_stats->busy;
+ stats->wake = txq_stats->wake;
+ stats->dropped = txq_stats->dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
+ stats->big_udp_pkts = txq_stats->big_udp_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ u64_stats_update_end(&stats->syncp);
+}
+
+void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats)
+{
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->bytes = 0;
+ txq_stats->packets = 0;
+ txq_stats->busy = 0;
+ txq_stats->wake = 0;
+ txq_stats->dropped = 0;
+ txq_stats->big_frags_pkts = 0;
+ txq_stats->big_udp_pkts = 0;
+
+ txq_stats->ufo_pkt_unsupport = 0;
+ txq_stats->ufo_linearize_err = 0;
+ txq_stats->ufo_alloc_skb_err = 0;
+ txq_stats->skb_pad_err = 0;
+ txq_stats->frag_len_overflow = 0;
+ txq_stats->offload_cow_skb_err = 0;
+ txq_stats->alloc_cpy_frag_err = 0;
+ txq_stats->map_cpy_frag_err = 0;
+ txq_stats->map_frag_err = 0;
+ txq_stats->frag_size_err = 0;
+ txq_stats->unknown_tunnel_pkt = 0;
+ u64_stats_update_end(&txq_stats->syncp);
}
-/**
- * txq_stats_init - Initialize the statistics of specific queue
- * @txq: Logical Tx Queue
- **/
static void txq_stats_init(struct hinic_txq *txq)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
u64_stats_init(&txq_stats->syncp);
- hinic_txq_clean_stats(txq);
+ hinic_txq_clean_stats(txq_stats);
}
-/**
- * tx_map_skb - dma mapping for skb and return sges
- * @nic_dev: nic device
- * @skb: the skb
- * @sges: returned sges
- *
- * Return 0 - Success, negative - Failure
- **/
-static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
- struct hinic_sge *sges)
+inline void hinic_set_buf_desc(struct hinic_sq_bufdesc *buf_descs,
+ dma_addr_t addr, u32 len)
+{
+ buf_descs->hi_addr = cpu_to_be32(upper_32_bits(addr));
+ buf_descs->lo_addr = cpu_to_be32(lower_32_bits(addr));
+ buf_descs->len = cpu_to_be32(len);
+}
+
+static int tx_map_skb(struct hinic_nic_dev *nic_dev, struct sk_buff *skb,
+ struct hinic_txq *txq, struct hinic_tx_info *tx_info,
+ struct hinic_sq_bufdesc *buf_descs, u16 skb_nr_frags)
{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- skb_frag_t *frag;
- dma_addr_t dma_addr;
- int i, j;
-
- dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, dma_addr)) {
- dev_err(&pdev->dev, "Failed to map Tx skb data\n");
- return -EFAULT;
- }
-
- hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
-
- for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
-
- dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, dma_addr)) {
- dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
- goto err_tx_map;
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic_dma_len *dma_len = tx_info->dma_len;
+ skb_frag_t *frag = NULL;
+ u16 base_nr_frags;
+ int j, i = 0;
+ int node, err = 0;
+ u32 nsize, cpy_nsize = 0;
+ u8 *vaddr, *cpy_buff = NULL;
+
+ if (unlikely(skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE)) {
+ for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++)
+ cpy_nsize +=
+ skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
+ if (!cpy_nsize) {
+ TXQ_STATS_INC(txq, alloc_cpy_frag_err);
+ return -EINVAL;
+ }
+
+ node = dev_to_node(&nic_dev->pdev->dev);
+ if (node == NUMA_NO_NODE)
+ cpy_buff = kzalloc(cpy_nsize,
+ GFP_ATOMIC | __GFP_NOWARN);
+ else
+ cpy_buff = kzalloc_node(cpy_nsize,
+ GFP_ATOMIC | __GFP_NOWARN,
+ node);
+
+ if (!cpy_buff) {
+ TXQ_STATS_INC(txq, alloc_cpy_frag_err);
+ return -ENOMEM;
}
- hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
+ tx_info->cpy_buff = cpy_buff;
+
+ for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ nsize = skb_frag_size(frag);
+
+ vaddr = kmap_local_page(skb_frag_page(frag));
+ memcpy(cpy_buff, vaddr + frag->bv_offset, nsize);
+ kunmap_local(vaddr);
+ cpy_buff += nsize;
+ }
}
- return 0;
+ dma_len[0].dma = dma_map_single(&pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_len[0].dma)) {
+ TXQ_STATS_INC(txq, map_frag_err);
+ err = -EFAULT;
+ goto map_single_err;
+ }
+ dma_len[0].len = skb_headlen(skb);
+ hinic_set_buf_desc(&buf_descs[0], dma_len[0].dma,
+ dma_len[0].len);
-err_tx_map:
- for (j = 0; j < i; j++)
- dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
- sges[j + 1].len, DMA_TO_DEVICE);
+ if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE)
+ base_nr_frags = HINIC_MAX_SKB_NR_FRAGE - 1;
+ else
+ base_nr_frags = skb_nr_frags;
+
+ for (i = 0; i < base_nr_frags; ) {
+ frag = &(skb_shinfo(skb)->frags[i]);
+ nsize = skb_frag_size(frag);
+ i++;
+ dma_len[i].dma = skb_frag_dma_map(&pdev->dev, frag, 0,
+ nsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_len[i].dma)) {
+ TXQ_STATS_INC(txq, map_frag_err);
+ i--;
+ err = -EFAULT;
+ goto frag_map_err;
+ }
+ dma_len[i].len = nsize;
+
+ hinic_set_buf_desc(&buf_descs[i], dma_len[i].dma,
+ dma_len[i].len);
+ }
+
+ if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE) {
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].dma =
+ dma_map_single(&pdev->dev, tx_info->cpy_buff,
+ cpy_nsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].dma)) {
+ TXQ_STATS_INC(txq, map_cpy_frag_err);
+ err = -EFAULT;
+ goto fusion_map_err;
+ }
- dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].len = cpy_nsize;
+ hinic_set_buf_desc(&buf_descs[HINIC_MAX_SKB_NR_FRAGE],
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].dma,
+ dma_len[HINIC_MAX_SKB_NR_FRAGE].len);
+ }
+
+ return 0;
+
+fusion_map_err:
+frag_map_err:
+ for (j = 0; j < i;) {
+ j++;
+ dma_unmap_page(&pdev->dev, dma_len[j].dma,
+ dma_len[j].len, DMA_TO_DEVICE);
+ }
+ dma_unmap_single(&pdev->dev, dma_len[0].dma, dma_len[0].len,
DMA_TO_DEVICE);
- return -EFAULT;
+
+map_single_err:
+ kfree(tx_info->cpy_buff);
+ tx_info->cpy_buff = NULL;
+
+ return err;
}
-/**
- * tx_unmap_skb - unmap the dma address of the skb
- * @nic_dev: nic device
- * @skb: the skb
- * @sges: the sges that are connected to the skb
- **/
-static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
- struct hinic_sge *sges)
+static inline void tx_unmap_skb(struct hinic_nic_dev *nic_dev,
+ struct sk_buff *skb,
+ struct hinic_dma_len *dma_len,
+ u16 valid_nr_frags)
{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
+ struct pci_dev *pdev = nic_dev->pdev;
int i;
+ u16 nr_frags = valid_nr_frags;
- for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
- dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
- sges[i + 1].len, DMA_TO_DEVICE);
+ if (nr_frags > HINIC_MAX_SKB_NR_FRAGE)
+ nr_frags = HINIC_MAX_SKB_NR_FRAGE;
- dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
- DMA_TO_DEVICE);
+ for (i = 0; i < nr_frags; ) {
+ i++;
+ dma_unmap_page(&pdev->dev,
+ dma_len[i].dma,
+ dma_len[i].len, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_single(&pdev->dev, dma_len[0].dma,
+ dma_len[0].len, DMA_TO_DEVICE);
}
-static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
+union hinic_ip {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
+
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_ip *ip,
union hinic_l4 *l4,
- enum hinic_offload_type offload_type,
- enum hinic_l3_offload_type *l3_type,
- u8 *l4_proto)
+ enum tx_offload_type offload_type,
+ enum sq_l3_type *l3_type, u8 *l4_proto)
{
- u8 *exthdr;
+ unsigned char *exthdr;
if (ip->v4->version == 4) {
*l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
- IPV4_PKT_NO_CHKSUM_OFFLOAD :
- IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ IPV4_PKT_NO_CHKSUM_OFFLOAD : IPV4_PKT_WITH_CHKSUM_OFFLOAD;
*l4_proto = ip->v4->protocol;
} else if (ip->v4->version == 6) {
*l3_type = IPV6_PKT;
exthdr = ip->hdr + sizeof(*ip->v6);
*l4_proto = ip->v6->nexthdr;
if (exthdr != l4->hdr) {
- int start = exthdr - skb->data;
- __be16 frag_off;
+ __be16 frag_off = 0;
- ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
+ ipv6_skip_exthdr(skb, (int)(exthdr - skb->data),
+ l4_proto, &frag_off);
}
} else {
- *l3_type = L3TYPE_UNKNOWN;
+ *l3_type = UNKNOWN_L3TYPE;
*l4_proto = 0;
}
}
static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
- enum hinic_offload_type offload_type, u8 l4_proto,
- enum hinic_l4_offload_type *l4_offload,
+ enum tx_offload_type offload_type, u8 l4_proto,
+ enum sq_l4offload_type *l4_offload,
u32 *l4_len, u32 *offset)
{
- *l4_offload = OFFLOAD_DISABLE;
*offset = 0;
*l4_len = 0;
+ *l4_offload = OFFLOAD_DISABLE;
switch (l4_proto) {
case IPPROTO_TCP:
*l4_offload = TCP_OFFLOAD_ENABLE;
- /* doff in unit of 4B */
- *l4_len = l4->tcp->doff * 4;
+ *l4_len = l4->tcp->doff * 4; /* doff in unit of 4B */
+ /* To keep same with TSO, payload offset begins from paylaod */
*offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
break;
@@ -255,6 +327,9 @@ static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
*l4_offload = SCTP_OFFLOAD_ENABLE;
*l4_len = sizeof(struct sctphdr);
+ /* To keep same with UFO, payload offset
+ * begins from L4 header
+ */
*offset = TRANSPORT_OFFSET(l4->hdr, skb);
break;
@@ -263,102 +338,15 @@ static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
}
}
-static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
-{
- return (ip->v4->version == 4) ?
- csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
- csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
-}
-
-static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
- struct sk_buff *skb)
+static int hinic_tx_csum(struct hinic_txq *txq, struct hinic_sq_task *task,
+ u32 *queue_info, struct sk_buff *skb)
{
- u32 offset, l4_len, ip_identify, network_hdr_len;
- enum hinic_l3_offload_type l3_offload;
- enum hinic_l4_offload_type l4_offload;
- union hinic_l3 ip;
- union hinic_l4 l4;
- u8 l4_proto;
-
- if (!skb_is_gso(skb))
- return 0;
-
- if (skb_cow_head(skb, 0) < 0)
- return -EPROTONOSUPPORT;
-
- if (skb->encapsulation) {
- u32 gso_type = skb_shinfo(skb)->gso_type;
- u32 tunnel_type = 0;
- u32 l4_tunnel_len;
-
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
- network_hdr_len = skb_inner_network_header_len(skb);
-
- if (ip.v4->version == 4) {
- ip.v4->tot_len = 0;
- l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
- } else if (ip.v4->version == 6) {
- l3_offload = IPV6_PKT;
- } else {
- l3_offload = 0;
- }
-
- hinic_task_set_outter_l3(task, l3_offload,
- skb_network_header_len(skb));
-
- if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
- l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
- tunnel_type = TUNNEL_UDP_CSUM;
- } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
- tunnel_type = TUNNEL_UDP_NO_CSUM;
- }
-
- l4_tunnel_len = skb_inner_network_offset(skb) -
- skb_transport_offset(skb);
- hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
-
- ip.hdr = skb_inner_network_header(skb);
- l4.hdr = skb_inner_transport_header(skb);
- } else {
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
- network_hdr_len = skb_network_header_len(skb);
- }
-
- /* initialize inner IP header fields */
- if (ip.v4->version == 4)
- ip.v4->tot_len = 0;
- else
- ip.v6->payload_len = 0;
-
- get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
- &l4_proto);
-
- hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
-
- ip_identify = 0;
- if (l4_proto == IPPROTO_TCP)
- l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
-
- get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
- &l4_len, &offset);
-
- hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
- ip_identify, skb_shinfo(skb)->gso_size);
-
- return 1;
-}
-
-static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
- struct sk_buff *skb)
-{
- enum hinic_l4_offload_type l4_offload;
- u32 offset, l4_len, network_hdr_len;
- enum hinic_l3_offload_type l3_type;
- u32 tunnel_type = NOT_TUNNEL;
- union hinic_l3 ip;
+ union hinic_ip ip;
union hinic_l4 l4;
+ enum sq_l3_type l3_type;
+ enum sq_l4offload_type l4_offload;
+ u32 network_hdr_len;
+ u32 offset, l4_len;
u8 l4_proto;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -366,8 +354,8 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
if (skb->encapsulation) {
u32 l4_tunnel_len;
+ u32 tunnel_type = TUNNEL_UDP_NO_CSUM;
- tunnel_type = TUNNEL_UDP_NO_CSUM;
ip.hdr = skb_network_header(skb);
if (ip.v4->version == 4) {
@@ -378,7 +366,6 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
__be16 frag_off;
l3_type = IPV6_PKT;
- tunnel_type = TUNNEL_UDP_CSUM;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
l4.hdr = skb_transport_header(skb);
@@ -386,7 +373,7 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
ipv6_skip_exthdr(skb, exthdr - skb->data,
&l4_proto, &frag_off);
} else {
- l3_type = L3TYPE_UNKNOWN;
+ l3_type = UNKNOWN_L3TYPE;
l4_proto = IPPROTO_RAW;
}
@@ -411,7 +398,8 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
network_hdr_len = skb_network_header_len(skb);
break;
default:
- /* Unsupported tunnel packet, disable csum offload */
+ TXQ_STATS_INC(txq, unknown_tunnel_pkt);
+ /* Unsupport tunnel packet, disable csum offload */
skb_checksum_help(skb);
return 0;
}
@@ -422,513 +410,737 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
l4.hdr = skb_transport_header(skb);
network_hdr_len = skb_network_header_len(skb);
}
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM,
+ &l3_type, &l4_proto);
- get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
- &l4_proto);
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto,
+ &l4_offload, &l4_len, &offset);
hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
- get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
- &l4_len, &offset);
-
hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
return 1;
}
-static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
- u16 vlan_tag, u16 vlan_pri)
+static __sum16 csum_magic(union hinic_ip *ip, unsigned short proto)
{
- task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
- HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
- *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
+static int hinic_tso(struct hinic_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ union hinic_ip ip;
+ union hinic_l4 l4;
+ enum sq_l3_type l3_type;
+ enum sq_l4offload_type l4_offload;
+ u32 network_hdr_len;
+ u32 offset, l4_len;
+ u32 ip_identify = 0;
+ u8 l4_proto;
+ int err;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (skb->encapsulation) {
+ u32 l4_tunnel_len;
+ u32 tunnel_type = 0;
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+
+ if (ip.v4->version == 4)
+ l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ else if (ip.v4->version == 6)
+ l3_type = IPV6_PKT;
+ else
+ l3_type = 0;
+
+ hinic_task_set_outter_l3(task, l3_type,
+ skb_network_header_len(skb));
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ tunnel_type = TUNNEL_UDP_CSUM;
+ } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
+ }
+
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ }
+
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO,
+ &l3_type, &l4_proto);
+
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto,
+ &l4_offload, &l4_len, &offset);
+
+ hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
+
+ hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len,
+ offset, ip_identify, skb_shinfo(skb)->gso_size);
+
+ return 1;
}
-static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
- u32 *queue_info)
+static enum tx_offload_type hinic_tx_offload(struct hinic_txq *txq,
+ struct sk_buff *skb,
+ struct hinic_sq_task *task,
+ u32 *queue_info, u8 avd_flag)
{
- enum hinic_offload_type offload = 0;
+ enum tx_offload_type offload = 0;
+ int tso_cs_en;
u16 vlan_tag;
- int enabled;
- enabled = offload_tso(task, queue_info, skb);
- if (enabled > 0) {
+ task->pkt_info0 = 0;
+ task->pkt_info1 = 0;
+ task->pkt_info2 = 0;
+
+ tso_cs_en = hinic_tso(task, queue_info, skb);
+ if (tso_cs_en < 0) {
+ offload = TX_OFFLOAD_INVALID;
+ return offload;
+ } else if (tso_cs_en) {
offload |= TX_OFFLOAD_TSO;
- } else if (enabled == 0) {
- enabled = offload_csum(task, queue_info, skb);
- if (enabled)
- offload |= TX_OFFLOAD_CSUM;
} else {
- return -EPROTONOSUPPORT;
+ tso_cs_en = hinic_tx_csum(txq, task, queue_info, skb);
+ if (tso_cs_en)
+ offload |= TX_OFFLOAD_CSUM;
}
if (unlikely(skb_vlan_tag_present(skb))) {
vlan_tag = skb_vlan_tag_get(skb);
- offload_vlan(task, queue_info, vlan_tag,
- vlan_tag >> VLAN_PRIO_SHIFT);
+ hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
+ vlan_tag >> VLAN_PRIO_SHIFT);
offload |= TX_OFFLOAD_VLAN;
}
- if (offload)
- hinic_task_set_l2hdr(task, skb_network_offset(skb));
-
- /* payload offset should not more than 221 */
- if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
- MAX_PAYLOAD_OFFSET) {
- return -EPROTONOSUPPORT;
+ if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) >
+ MAX_PAYLOAD_OFFSET)) {
+ offload = TX_OFFLOAD_INVALID;
+ return offload;
}
- /* mss should not less than 80 */
- if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
- *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
- *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
+ if (avd_flag == HINIC_TX_UFO_AVD)
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, UFO_AVD);
+
+ if (offload) {
+ hinic_task_set_tx_offload_valid(task, skb_network_offset(skb));
+ task->pkt_info0 = be32_to_cpu(task->pkt_info0);
+ task->pkt_info1 = be32_to_cpu(task->pkt_info1);
+ task->pkt_info2 = be32_to_cpu(task->pkt_info2);
}
- return 0;
+ return offload;
}
-netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static inline void __get_pkt_stats(struct hinic_tx_info *tx_info,
+ struct sk_buff *skb)
{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 prod_idx, q_id = skb->queue_mapping;
- struct netdev_queue *netdev_txq;
- int nr_sges, err = NETDEV_TX_OK;
- struct hinic_sq_wqe *sq_wqe;
- unsigned int wqe_size;
- struct hinic_txq *txq;
- struct hinic_qp *qp;
+ u32 ihs, hdr_len;
+
+ if (skb_is_gso(skb)) {
+#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \
+ defined(HAVE_SK_BUFF_ENCAPSULATION))
+ if (skb->encapsulation) {
+#ifdef HAVE_SKB_INNER_TRANSPORT_OFFSET
+ ihs = skb_inner_transport_offset(skb) +
+ inner_tcp_hdrlen(skb);
+#else
+ ihs = (skb_inner_transport_header(skb) - skb->data) +
+ inner_tcp_hdrlen(skb);
+#endif
+ } else {
+#endif
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \
+ defined(HAVE_SK_BUFF_ENCAPSULATION))
+ }
+#endif
+ hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ tx_info->num_bytes = skb->len + (u64)hdr_len;
- txq = &nic_dev->txqs[q_id];
- qp = container_of(txq->sq, struct hinic_qp, sq);
- nr_sges = skb_shinfo(skb)->nr_frags + 1;
+ } else {
+ tx_info->num_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
+ }
+
+ tx_info->num_pkts = 1;
+}
+
+inline u8 hinic_get_vlan_pri(struct sk_buff *skb)
+{
+ u16 vlan_tci = 0;
+ int err;
- err = tx_map_skb(nic_dev, skb, txq->sges);
+ err = vlan_get_tag(skb, &vlan_tci);
if (err)
- goto skb_error;
+ return 0;
- wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
+ return (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
- sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
- if (!sq_wqe) {
- netif_stop_subqueue(netdev, qp->q_id);
+static void *__try_to_get_wqe(struct net_device *netdev, u16 q_id,
+ int wqebb_cnt, u16 *pi, u8 *owner)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ void *wqe = NULL;
+
+ netif_stop_subqueue(netdev, q_id);
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (unlikely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >=
+ wqebb_cnt)) {
+ netif_start_subqueue(netdev, q_id);
+ /* there have enough wqebbs after queue is wake up */
+ wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id,
+ wqebb_cnt, pi, owner);
+ }
- sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
- if (sq_wqe) {
- netif_wake_subqueue(nic_dev->netdev, qp->q_id);
- goto process_sq_wqe;
- }
+ return wqe;
+}
- tx_unmap_skb(nic_dev, skb, txq->sges);
+#define HINIC_FRAG_STATUS_OK 0
+#define HINIC_FRAG_STATUS_IGNORE 1
- u64_stats_update_begin(&txq->txq_stats.syncp);
- txq->txq_stats.tx_busy++;
- u64_stats_update_end(&txq->txq_stats.syncp);
- err = NETDEV_TX_BUSY;
- wqe_size = 0;
- goto flush_skbs;
+static netdev_tx_t hinic_send_one_skb(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct hinic_txq *txq,
+ u8 *flag, u8 avd_flag)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_tx_info *tx_info;
+ struct hinic_sq_wqe *wqe = NULL;
+ enum tx_offload_type offload = 0;
+ u16 q_id = txq->q_id;
+ u32 queue_info = 0;
+ u8 owner = 0;
+ u16 pi = 0;
+ int err, wqebb_cnt;
+ u16 num_sge = 0;
+ u16 original_nr_frags;
+ u16 new_nr_frags;
+ u16 i;
+ int frag_err = HINIC_FRAG_STATUS_OK;
+
+ /* skb->dev will not initialized when calling netdev_alloc_skb_ip_align
+ * and parameter of length is largger then PAGE_SIZE(under redhat7.3),
+ * but skb->dev will be used in vlan_get_tag or somewhere
+ */
+ if (unlikely(!skb->dev))
+ skb->dev = netdev;
+
+ if (unlikely(skb->len < MIN_SKB_LEN)) {
+ if (skb_pad(skb, (int)(MIN_SKB_LEN - skb->len))) {
+ TXQ_STATS_INC(txq, skb_pad_err);
+ goto tx_skb_pad_err;
+ }
+
+ skb->len = MIN_SKB_LEN;
}
-process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
- hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
+ original_nr_frags = skb_shinfo(skb)->nr_frags;
+ new_nr_frags = original_nr_frags;
+
+ /* If size of lastest frags are all zero, should ignore this frags.
+ * If size of some frag in the middle is zero, should drop this skb.
+ */
+ for (i = 0; i < original_nr_frags; i++) {
+ if ((skb_frag_size(&skb_shinfo(skb)->frags[i])) &&
+ frag_err == HINIC_FRAG_STATUS_OK)
+ continue;
+
+ if ((!skb_frag_size(&skb_shinfo(skb)->frags[i])) &&
+ frag_err == HINIC_FRAG_STATUS_OK) {
+ frag_err = HINIC_FRAG_STATUS_IGNORE;
+ new_nr_frags = i + 1;
+ continue;
+ }
-flush_skbs:
- netdev_txq = netdev_get_tx_queue(netdev, q_id);
- if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
- hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
+ if ((!skb_frag_size(&skb_shinfo(skb)->frags[i])) &&
+ frag_err == HINIC_FRAG_STATUS_IGNORE)
+ continue;
- return err;
+ if ((skb_frag_size(&skb_shinfo(skb)->frags[i])) &&
+ frag_err == HINIC_FRAG_STATUS_IGNORE) {
+ TXQ_STATS_INC(txq, frag_size_err);
+ goto tx_drop_pkts;
+ }
+ }
-skb_error:
- dev_kfree_skb_any(skb);
- u64_stats_update_begin(&txq->txq_stats.syncp);
- txq->txq_stats.tx_dropped++;
- u64_stats_update_end(&txq->txq_stats.syncp);
+ num_sge = new_nr_frags + 1;
- return NETDEV_TX_OK;
-}
-
-netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- u16 prod_idx, q_id = skb->queue_mapping;
- struct netdev_queue *netdev_txq;
- int nr_sges, err = NETDEV_TX_OK;
- struct hinic_sq_wqe *sq_wqe;
- unsigned int wqe_size;
- struct hinic_txq *txq;
- struct hinic_qp *qp;
+ /* if skb->len is more than 65536B but num_sge is 1,
+ * driver will drop it
+ */
+ if (unlikely(skb->len > HINIC_GSO_MAX_SIZE && num_sge == 1)) {
+ TXQ_STATS_INC(txq, frag_len_overflow);
+ goto tx_drop_pkts;
+ }
- txq = &nic_dev->txqs[q_id];
- qp = container_of(txq->sq, struct hinic_qp, sq);
+ /* if sge number more than 17, driver will set 17 sges */
+ if (unlikely(num_sge > HINIC_MAX_SQ_SGE)) {
+ TXQ_STATS_INC(txq, big_frags_pkts);
+ num_sge = HINIC_MAX_SQ_SGE;
+ }
- if (skb->len < MIN_SKB_LEN) {
- if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
- netdev_err(netdev, "Failed to pad skb\n");
- goto update_error_stats;
+ wqebb_cnt = HINIC_SQ_WQEBB_CNT(num_sge);
+ if (likely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >=
+ wqebb_cnt)) {
+ if (likely(wqebb_cnt == 1)) {
+ hinic_update_sq_pi(nic_dev->hwdev, q_id,
+ wqebb_cnt, &pi, &owner);
+ wqe = txq->tx_info[pi].wqe;
+ } else {
+ wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id,
+ wqebb_cnt, &pi, &owner);
}
- skb->len = MIN_SKB_LEN;
+ } else {
+ wqe = __try_to_get_wqe(netdev, q_id, wqebb_cnt, &pi, &owner);
+ if (likely(!wqe)) {
+ TXQ_STATS_INC(txq, busy);
+ return NETDEV_TX_BUSY;
+ }
}
- nr_sges = skb_shinfo(skb)->nr_frags + 1;
- if (nr_sges > 17) {
- u64_stats_update_begin(&txq->txq_stats.syncp);
- txq->txq_stats.big_frags_pkts++;
- u64_stats_update_end(&txq->txq_stats.syncp);
+ tx_info = &txq->tx_info[pi];
+ tx_info->skb = skb;
+ tx_info->wqebb_cnt = wqebb_cnt;
+ tx_info->valid_nr_frags = new_nr_frags;
+
+ __get_pkt_stats(tx_info, skb);
+
+ offload = hinic_tx_offload(txq, skb, &wqe->task, &queue_info, avd_flag);
+ if (unlikely(offload == TX_OFFLOAD_INVALID)) {
+ hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner);
+ TXQ_STATS_INC(txq, offload_cow_skb_err);
+ goto tx_drop_pkts;
}
- if (nr_sges > txq->max_sges) {
- netdev_err(netdev, "Too many Tx sges\n");
- goto skb_error;
+ err = tx_map_skb(nic_dev, skb, txq, tx_info, wqe->buf_descs,
+ new_nr_frags);
+ if (err) {
+ hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner);
+ goto tx_drop_pkts;
}
- err = tx_map_skb(nic_dev, skb, txq->sges);
- if (err)
- goto skb_error;
+ hinic_prepare_sq_ctrl(&wqe->ctrl, queue_info, num_sge, owner);
- wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
+ hinic_send_sq_wqe(nic_dev->hwdev, q_id, wqe, wqebb_cnt,
+ nic_dev->sq_cos_mapping[hinic_get_vlan_pri(skb)]);
- sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
- if (!sq_wqe) {
- netif_stop_subqueue(netdev, qp->q_id);
+ return NETDEV_TX_OK;
- /* Check for the case free_tx_poll is called in another cpu
- * and we stopped the subqueue after free_tx_poll check.
- */
- sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
- if (sq_wqe) {
- netif_wake_subqueue(nic_dev->netdev, qp->q_id);
- goto process_sq_wqe;
- }
+tx_drop_pkts:
+ dev_kfree_skb_any(skb);
- tx_unmap_skb(nic_dev, skb, txq->sges);
+tx_skb_pad_err:
+ TXQ_STATS_INC(txq, dropped);
- u64_stats_update_begin(&txq->txq_stats.syncp);
- txq->txq_stats.tx_busy++;
- u64_stats_update_end(&txq->txq_stats.syncp);
- err = NETDEV_TX_BUSY;
- wqe_size = 0;
- goto flush_skbs;
+ *flag = HINIC_TX_DROPPED;
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 q_id = skb_get_queue_mapping(skb);
+ struct hinic_txq *txq;
+ u8 flag = 0;
+
+ if (unlikely(!nic_dev->heart_status)) {
+ dev_kfree_skb_any(skb);
+ HINIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop);
+ return NETDEV_TX_OK;
}
-process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
+ txq = &nic_dev->txqs[q_id];
- err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
- if (err)
- goto offload_error;
+ return hinic_send_one_skb(skb, netdev, txq, &flag, HINIC_TX_NON_AVD);
+}
- hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
+netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 q_id = skb_get_queue_mapping(skb);
+ struct hinic_txq *txq;
+ u8 flag = 0;
-flush_skbs:
- netdev_txq = netdev_get_tx_queue(netdev, q_id);
- if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
- hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
+ if (unlikely(!netif_carrier_ok(netdev) ||
+ !nic_dev->heart_status)) {
+ dev_kfree_skb_any(skb);
+ HINIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop);
+ return NETDEV_TX_OK;
+ }
- return err;
+ if (unlikely(q_id >= nic_dev->num_qps)) {
+ txq = &nic_dev->txqs[0];
+ HINIC_NIC_STATS_INC(nic_dev, tx_invalid_qid);
+ goto tx_drop_pkts;
+ }
+ txq = &nic_dev->txqs[q_id];
-offload_error:
- hinic_sq_return_wqe(txq->sq, wqe_size);
- tx_unmap_skb(nic_dev, skb, txq->sges);
+ return hinic_send_one_skb(skb, netdev, txq, &flag, HINIC_TX_NON_AVD);
-skb_error:
+tx_drop_pkts:
dev_kfree_skb_any(skb);
-
-update_error_stats:
u64_stats_update_begin(&txq->txq_stats.syncp);
- txq->txq_stats.tx_dropped++;
+ txq->txq_stats.dropped++;
u64_stats_update_end(&txq->txq_stats.syncp);
return NETDEV_TX_OK;
}
-/**
- * tx_free_skb - unmap and free skb
- * @nic_dev: nic device
- * @skb: the skb
- * @sges: the sges that are connected to the skb
- **/
-static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
- struct hinic_sge *sges)
+static inline void tx_free_skb(struct hinic_nic_dev *nic_dev,
+ struct sk_buff *skb,
+ struct hinic_tx_info *tx_info)
{
- tx_unmap_skb(nic_dev, skb, sges);
+ tx_unmap_skb(nic_dev, skb, tx_info->dma_len, tx_info->valid_nr_frags);
+ kfree(tx_info->cpy_buff);
+ tx_info->cpy_buff = NULL;
dev_kfree_skb_any(skb);
}
-/**
- * free_all_tx_skbs - free all skbs in tx queue
- * @txq: tx queue
- **/
static void free_all_tx_skbs(struct hinic_txq *txq)
{
- struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
- struct hinic_sq *sq = txq->sq;
- struct hinic_sq_wqe *sq_wqe;
- unsigned int wqe_size;
- struct sk_buff *skb;
- int nr_sges;
+ struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev);
+ struct hinic_tx_info *tx_info;
u16 ci;
+ int free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev,
+ txq->q_id) + 1;
- while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
- sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
- if (!sq_wqe)
- break;
+ while (free_wqebbs < txq->q_depth) {
+ ci = hinic_get_sq_local_ci(nic_dev->hwdev, txq->q_id);
- nr_sges = skb_shinfo(skb)->nr_frags + 1;
+ tx_info = &txq->tx_info[ci];
- hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
+ tx_free_skb(nic_dev, tx_info->skb, tx_info);
- hinic_sq_put_wqe(sq, wqe_size);
+ hinic_update_sq_local_ci(nic_dev->hwdev, txq->q_id,
+ tx_info->wqebb_cnt);
- tx_free_skb(nic_dev, skb, txq->free_sges);
+ free_wqebbs += tx_info->wqebb_cnt;
}
}
-/**
- * free_tx_poll - free finished tx skbs in tx queue that connected to napi
- * @napi: napi
- * @budget: number of tx
- *
- * Return 0 - Success, negative - Failure
- **/
-static int free_tx_poll(struct napi_struct *napi, int budget)
+int hinic_tx_poll(struct hinic_txq *txq, int budget)
{
- struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
- struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
- struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
- struct netdev_queue *netdev_txq;
- struct hinic_sq *sq = txq->sq;
- struct hinic_wq *wq = sq->wq;
- struct hinic_sq_wqe *sq_wqe;
- unsigned int wqe_size;
- int nr_sges, pkts = 0;
+ struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev);
struct sk_buff *skb;
- u64 tx_bytes = 0;
- u16 hw_ci, sw_ci;
+ struct hinic_tx_info *tx_info;
+ u64 tx_bytes = 0, wake = 0;
+ int pkts = 0, nr_pkts = 0, wqebb_cnt = 0;
+ u16 hw_ci, sw_ci = 0, q_id = txq->q_id;
- do {
- hw_ci = HW_CONS_IDX(sq) & wq->mask;
+ hw_ci = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id);
+ dma_rmb();
+ sw_ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id);
- dma_rmb();
+ do {
+ tx_info = &txq->tx_info[sw_ci];
- /* Reading a WQEBB to get real WQE size and consumer index. */
- sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
- if (!sq_wqe ||
- (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
+ /* Whether all of the wqebb of this wqe is completed */
+ if (hw_ci == sw_ci || ((hw_ci - sw_ci) &
+ txq->q_mask) < tx_info->wqebb_cnt) {
break;
-
- /* If this WQE have multiple WQEBBs, we will read again to get
- * full size WQE.
- */
- if (wqe_size > wq->wqebb_size) {
- sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
- if (unlikely(!sq_wqe))
- break;
}
- tx_bytes += skb->len;
- pkts++;
+ sw_ci = (u16)(sw_ci + tx_info->wqebb_cnt) & txq->q_mask;
+ prefetch(&txq->tx_info[sw_ci]);
- nr_sges = skb_shinfo(skb)->nr_frags + 1;
+ wqebb_cnt += tx_info->wqebb_cnt;
- hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
+ skb = tx_info->skb;
+ tx_bytes += tx_info->num_bytes;
+ nr_pkts += tx_info->num_pkts;
+ pkts++;
- hinic_sq_put_wqe(sq, wqe_size);
+ tx_free_skb(nic_dev, skb, tx_info);
- tx_free_skb(nic_dev, skb, txq->free_sges);
- } while (pkts < budget);
+ } while (likely(pkts < budget));
- if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
- hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
- netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
+ hinic_update_sq_local_ci(nic_dev->hwdev, q_id, wqebb_cnt);
- __netif_tx_lock(netdev_txq, smp_processor_id());
- if (!netif_testing(nic_dev->netdev))
- netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+ if (unlikely(__netif_subqueue_stopped(nic_dev->netdev, q_id) &&
+ hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >= 1 &&
+ test_bit(HINIC_INTF_UP, &nic_dev->flags))) {
+ struct netdev_queue *netdev_txq =
+ netdev_get_tx_queue(txq->netdev, q_id);
+ __netif_tx_lock(netdev_txq, smp_processor_id());
+ /* To avoid re-waking subqueue with xmit_frame */
+ if (__netif_subqueue_stopped(nic_dev->netdev, q_id)) {
+ netif_wake_subqueue(nic_dev->netdev, q_id);
+ wake++;
+ }
__netif_tx_unlock(netdev_txq);
-
- u64_stats_update_begin(&txq->txq_stats.syncp);
- txq->txq_stats.tx_wake++;
- u64_stats_update_end(&txq->txq_stats.syncp);
}
u64_stats_update_begin(&txq->txq_stats.syncp);
txq->txq_stats.bytes += tx_bytes;
- txq->txq_stats.pkts += pkts;
+ txq->txq_stats.packets += nr_pkts;
+ txq->txq_stats.wake += wake;
u64_stats_update_end(&txq->txq_stats.syncp);
- if (pkts < budget) {
- napi_complete(napi);
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- sq->msix_entry,
- HINIC_MSIX_ENABLE);
-
- return pkts;
- }
-
- return budget;
+ return pkts;
}
-static irqreturn_t tx_irq(int irq, void *data)
+int hinic_setup_tx_wqe(struct hinic_txq *txq)
{
- struct hinic_txq *txq = data;
- struct hinic_dev *nic_dev;
+ struct net_device *netdev = txq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sq_wqe *wqe;
+ struct hinic_tx_info *tx_info;
+ u16 pi = 0;
+ int i;
+ u8 owner = 0;
+
+ for (i = 0; i < txq->q_depth; i++) {
+ tx_info = &txq->tx_info[i];
- nic_dev = netdev_priv(txq->netdev);
+ wqe = hinic_get_sq_wqe(nic_dev->hwdev, txq->q_id,
+ 1, &pi, &owner);
+ if (!wqe) {
+ nicif_err(nic_dev, drv, netdev, "Failed to get SQ wqe\n");
+ break;
+ }
- if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
- /* Disable the interrupt until napi will be completed */
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- txq->sq->msix_entry,
- HINIC_MSIX_DISABLE);
+ tx_info->wqe = wqe;
+ }
- hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
+ hinic_return_sq_wqe(nic_dev->hwdev, txq->q_id, txq->q_depth, owner);
- napi_schedule(&txq->napi);
- return IRQ_HANDLED;
+ return i;
}
-static int tx_request_irq(struct hinic_txq *txq)
+int hinic_setup_all_tx_resources(struct net_device *netdev)
{
- struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
- struct hinic_msix_config interrupt_info = {0};
- struct hinic_intr_coal_info *intr_coal = NULL;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_sq *sq = txq->sq;
- struct hinic_qp *qp;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_txq *txq;
+ u64 tx_info_sz;
+ u16 i, q_id;
int err;
- qp = container_of(sq, struct hinic_qp, sq);
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ tx_info_sz = txq->q_depth * sizeof(*txq->tx_info);
+ if (!tx_info_sz) {
+ nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size txq%d info\n",
+ q_id);
+ err = -EINVAL;
+ goto init_txq_err;
+ }
- netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll,
- nic_dev->tx_weight);
+ txq->tx_info = kzalloc(tx_info_sz, GFP_KERNEL);
+ if (!txq->tx_info) {
+ err = -ENOMEM;
+ goto init_txq_err;
+ }
- hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
- TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
- TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
- TX_IRQ_NO_RESEND_TIMER);
+ err = hinic_setup_tx_wqe(txq);
+ if (err != txq->q_depth) {
+ nicif_err(nic_dev, drv, netdev, "Failed to setup Tx: %d wqe\n",
+ q_id);
+ q_id++;
+ goto init_txq_err;
+ }
+ }
- intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
- interrupt_info.msix_index = sq->msix_entry;
- interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
- interrupt_info.pending_cnt = intr_coal->pending_limt;
- interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
+ return 0;
- err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
- if (err) {
- netif_err(nic_dev, drv, txq->netdev,
- "Failed to set TX interrupt coalescing attribute\n");
- netif_napi_del(&txq->napi);
- return err;
+init_txq_err:
+ for (i = 0; i < q_id; i++) {
+ txq = &nic_dev->txqs[i];
+ kfree(txq->tx_info);
}
- err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
- if (err) {
- dev_err(&pdev->dev, "Failed to request Tx irq\n");
- netif_napi_del(&txq->napi);
- return err;
- }
+ return err;
+}
- return 0;
+void hinic_free_all_tx_resources(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_txq *txq;
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->num_qps; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ free_all_tx_skbs(txq);
+ kfree(txq->tx_info);
+ }
}
-static void tx_free_irq(struct hinic_txq *txq)
+void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id)
{
- struct hinic_sq *sq = txq->sq;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ int up;
- free_irq(sq->irq, txq);
- netif_napi_del(&txq->napi);
+ for (up = HINIC_DCB_UP_MAX - 1; up >= 0; up--)
+ nic_dev->sq_cos_mapping[up] = nic_dev->default_cos_id;
}
-/**
- * hinic_init_txq - Initialize the Tx Queue
- * @txq: Logical Tx Queue
- * @sq: Hardware Tx Queue to connect the Logical queue with
- * @netdev: network device to connect the Logical queue with
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
- struct net_device *netdev)
+int hinic_sq_cos_mapping(struct net_device *netdev)
{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- int err, irqname_len;
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_dcb_state dcb_state = {0};
+ u8 default_cos = 0;
+ int err;
- txq->netdev = netdev;
- txq->sq = sq;
+ if (HINIC_FUNC_IS_VF(nic_dev->hwdev)) {
+ err = hinic_get_pf_dcb_state(nic_dev->hwdev, &dcb_state);
+ if (err) {
+ hinic_info(nic_dev, drv, "Failed to get vf default cos\n");
+ return err;
+ }
- txq_stats_init(txq);
+ default_cos = dcb_state.default_cos;
+ nic_dev->default_cos_id = default_cos;
+ hinic_set_sq_default_cos(nic_dev->netdev, default_cos);
+ } else {
+ default_cos = nic_dev->default_cos_id;
+ if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))
+ memcpy(nic_dev->sq_cos_mapping, nic_dev->up_cos,
+ sizeof(nic_dev->sq_cos_mapping));
+ else
+ hinic_set_sq_default_cos(nic_dev->netdev, default_cos);
+
+ dcb_state.dcb_on = !!test_bit(HINIC_DCB_ENABLE,
+ &nic_dev->flags);
+ dcb_state.default_cos = default_cos;
+ memcpy(dcb_state.up_cos, nic_dev->sq_cos_mapping,
+ sizeof(dcb_state.up_cos));
+
+ err = hinic_set_dcb_state(nic_dev->hwdev, &dcb_state);
+ if (err)
+ hinic_info(nic_dev, drv, "Failed to set vf default cos\n");
+ }
- txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
+ return err;
+}
- txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
- sizeof(*txq->sges), GFP_KERNEL);
- if (!txq->sges)
- return -ENOMEM;
+int hinic_alloc_txqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic_txq *txq;
+ u16 q_id, num_txqs = nic_dev->max_qps;
+ u64 txq_size;
- txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
- sizeof(*txq->free_sges), GFP_KERNEL);
- if (!txq->free_sges) {
- err = -ENOMEM;
- goto err_alloc_free_sges;
+ txq_size = num_txqs * sizeof(*nic_dev->txqs);
+ if (!txq_size) {
+ nic_err(&pdev->dev, "Cannot allocate zero size txqs\n");
+ return -EINVAL;
}
- irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
- txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
- if (!txq->irq_name) {
- err = -ENOMEM;
- goto err_alloc_irqname;
- }
+ nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL);
- sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
+ if (!nic_dev->txqs) {
+ nic_err(&pdev->dev, "Failed to allocate txqs\n");
+ return -ENOMEM;
+ }
- err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
- CI_UPDATE_NO_COALESC);
- if (err)
- goto err_hw_ci;
+ for (q_id = 0; q_id < num_txqs; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ txq->netdev = netdev;
+ txq->q_id = q_id;
+ txq->q_depth = nic_dev->sq_depth;
+ txq->q_mask = nic_dev->sq_depth - 1;
- err = tx_request_irq(txq);
- if (err) {
- netdev_err(netdev, "Failed to request Tx irq\n");
- goto err_req_tx_irq;
+ txq_stats_init(txq);
}
return 0;
+}
-err_req_tx_irq:
-err_hw_ci:
- devm_kfree(&netdev->dev, txq->irq_name);
+void hinic_free_txqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
-err_alloc_irqname:
- devm_kfree(&netdev->dev, txq->free_sges);
+ kfree(nic_dev->txqs);
+}
-err_alloc_free_sges:
- devm_kfree(&netdev->dev, txq->sges);
- return err;
+/* should stop transmit any packets before calling this function */
+#define HINIC_FLUSH_QUEUE_TIMEOUT 1000
+
+static bool hinic_get_hw_handle_status(void *hwdev, u16 q_id)
+{
+ u16 sw_pi = 0, hw_ci = 0;
+
+ sw_pi = hinic_dbg_get_sq_pi(hwdev, q_id);
+ hw_ci = hinic_get_sq_hw_ci(hwdev, q_id);
+
+ return sw_pi == hw_ci;
}
-/**
- * hinic_clean_txq - Clean the Tx Queue
- * @txq: Logical Tx Queue
- **/
-void hinic_clean_txq(struct hinic_txq *txq)
+int hinic_stop_sq(struct hinic_txq *txq)
{
- struct net_device *netdev = txq->netdev;
+ struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev);
+ unsigned long timeout;
+ int err;
+
+ timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies;
+ do {
+ if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id))
+ return 0;
+
+ usleep_range(900, 1000);
+ } while (time_before(jiffies, timeout));
+
+ /* force hardware to drop packets */
+ timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies;
+ do {
+ if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id))
+ return 0;
+
+ err = hinic_force_drop_tx_pkt(nic_dev->hwdev);
+ if (err)
+ break;
- tx_free_irq(txq);
+ usleep_range(9900, 10000);
+ } while (time_before(jiffies, timeout));
- free_all_tx_skbs(txq);
+ /* Avoid msleep takes too long and get a fake result */
+ if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id))
+ return 0;
- devm_kfree(&netdev->dev, txq->irq_name);
- devm_kfree(&netdev->dev, txq->free_sges);
- devm_kfree(&netdev->dev, txq->sges);
+ return -EFAULT;
}
+
+void hinic_flush_txqs(struct net_device *netdev)
+{
+ struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 qid;
+ int err;
+
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ err = hinic_stop_sq(&nic_dev->txqs[qid]);
+ if (err)
+ nicif_err(nic_dev, drv, netdev,
+ "Failed to stop sq%d\n", qid);
+ }
+} /*lint -e766*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
index 91dc778362f3..3d38b4475f99 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
@@ -1,54 +1,123 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Huawei HiNIC PCI Express Linux driver
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
*/
#ifndef HINIC_TX_H
#define HINIC_TX_H
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/u64_stats_sync.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_qp.h"
+enum tx_offload_type {
+ TX_OFFLOAD_TSO = BIT(0),
+ TX_OFFLOAD_CSUM = BIT(1),
+ TX_OFFLOAD_VLAN = BIT(2),
+ TX_OFFLOAD_INVALID = BIT(3),
+};
struct hinic_txq_stats {
- u64 pkts;
- u64 bytes;
- u64 tx_busy;
- u64 tx_wake;
- u64 tx_dropped;
+ u64 packets;
+ u64 bytes;
+ u64 busy;
+ u64 wake;
+ u64 dropped;
u64 big_frags_pkts;
+ u64 big_udp_pkts;
+
+ /* Subdivision statistics show in private tool */
+ u64 ufo_pkt_unsupport;
+ u64 ufo_linearize_err;
+ u64 ufo_alloc_skb_err;
+ u64 skb_pad_err;
+ u64 frag_len_overflow;
+ u64 offload_cow_skb_err;
+ u64 alloc_cpy_frag_err;
+ u64 map_cpy_frag_err;
+ u64 map_frag_err;
+ u64 frag_size_err;
+ u64 unknown_tunnel_pkt;
+
+ struct u64_stats_sync syncp;
+};
+
+struct hinic_dma_len {
+ dma_addr_t dma;
+ u32 len;
+};
+
+#define MAX_SGE_NUM_PER_WQE 17
+
+struct hinic_tx_info {
+ struct sk_buff *skb;
+
+ int wqebb_cnt;
- struct u64_stats_sync syncp;
+ int num_sge;
+ void *wqe;
+ u8 *cpy_buff;
+ u16 valid_nr_frags;
+ u16 num_pkts;
+ u64 num_bytes;
+ struct hinic_dma_len dma_len[MAX_SGE_NUM_PER_WQE];
};
struct hinic_txq {
- struct net_device *netdev;
- struct hinic_sq *sq;
+ struct net_device *netdev;
- struct hinic_txq_stats txq_stats;
+ u16 q_id;
+ u16 q_depth;
+ u16 q_mask;
+ struct hinic_txq_stats txq_stats;
+ u64 last_moder_packets;
+ u64 last_moder_bytes;
+ struct hinic_tx_info *tx_info;
+};
- int max_sges;
- struct hinic_sge *sges;
- struct hinic_sge *free_sges;
+enum hinic_tx_xmit_status {
+ HINIC_TX_OK = 0,
+ HINIC_TX_DROPPED = 1,
+ HINIC_TX_BUSY = 2,
+};
- char *irq_name;
- struct napi_struct napi;
+enum hinic_tx_avd_type {
+ HINIC_TX_NON_AVD = 0,
+ HINIC_TX_UFO_AVD = 1,
};
-void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
+void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats);
+
+void hinic_txq_get_stats(struct hinic_txq *txq,
+ struct hinic_txq_stats *stats);
-netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev);
netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
- struct net_device *netdev);
+int hinic_setup_all_tx_resources(struct net_device *netdev);
+
+void hinic_free_all_tx_resources(struct net_device *netdev);
+
+void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id);
+
+int hinic_sq_cos_mapping(struct net_device *netdev);
+
+int hinic_alloc_txqs(struct net_device *netdev);
+
+void hinic_free_txqs(struct net_device *netdev);
+
+int hinic_tx_poll(struct hinic_txq *txq, int budget);
+
+u8 hinic_get_vlan_pri(struct sk_buff *skb);
-void hinic_clean_txq(struct hinic_txq *txq);
+void hinic_flush_txqs(struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_wq.c
new file mode 100644
index 000000000000..27a81b78b026
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_wq.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ossl_knl.h"
+#include "hinic_hw.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hwif.h"
+#include "hinic_wq.h"
+#include "hinic_qe_def.h"
+
+#define WQS_MAX_NUM_BLOCKS 256
+#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
+ sizeof((wqs)->free_blocks[0]))
+
+static int wqs_next_block(struct hinic_wqs *wqs, u32 *page_idx,
+ u32 *block_idx);
+
+static void wqs_return_block(struct hinic_wqs *wqs, u32 page_idx,
+ u32 block_idx);
+
+static int queue_alloc_page(void *handle, u64 **vaddr, u64 *paddr,
+ u64 **shadow_vaddr, u64 page_sz)
+{
+ dma_addr_t dma_addr = 0;
+
+ *vaddr = dma_alloc_coherent(handle, page_sz, &dma_addr, GFP_KERNEL);
+ if (!*vaddr) {
+ sdk_err(handle, "Failed to allocate dma to wqs page\n");
+ return -ENOMEM;
+ }
+
+ if (!ADDR_4K_ALIGNED(dma_addr)) {
+ sdk_err(handle, "Cla is not 4k aligned\n");
+ goto shadow_vaddr_err;
+ }
+
+ *paddr = (u64)dma_addr;
+
+ /* use vzalloc for big mem, shadow_vaddr only used at initialization */
+ *shadow_vaddr = vzalloc(page_sz);
+ if (!*shadow_vaddr) {
+ sdk_err(handle, "Failed to allocate shadow page vaddr\n");
+ goto shadow_vaddr_err;
+ }
+
+ return 0;
+
+shadow_vaddr_err:
+ dma_free_coherent(handle, page_sz, *vaddr, dma_addr);
+ return -ENOMEM;
+}
+
+static int wqs_allocate_page(struct hinic_wqs *wqs, u32 page_idx)
+{
+ return queue_alloc_page(wqs->dev_hdl, &wqs->page_vaddr[page_idx],
+ &wqs->page_paddr[page_idx],
+ &wqs->shadow_page_vaddr[page_idx],
+ WQS_PAGE_SIZE);
+}
+
+static void wqs_free_page(struct hinic_wqs *wqs, u32 page_idx)
+{
+ dma_free_coherent(wqs->dev_hdl, WQS_PAGE_SIZE,
+ wqs->page_vaddr[page_idx],
+ (dma_addr_t)wqs->page_paddr[page_idx]);
+ vfree(wqs->shadow_page_vaddr[page_idx]);
+}
+
+static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
+{
+ return queue_alloc_page(cmdq_pages->dev_hdl,
+ &cmdq_pages->cmdq_page_vaddr,
+ &cmdq_pages->cmdq_page_paddr,
+ &cmdq_pages->cmdq_shadow_page_vaddr,
+ CMDQ_PAGE_SIZE);
+}
+
+static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
+{
+ dma_free_coherent(cmdq_pages->dev_hdl, CMDQ_PAGE_SIZE,
+ cmdq_pages->cmdq_page_vaddr,
+ (dma_addr_t)cmdq_pages->cmdq_page_paddr);
+ vfree(cmdq_pages->cmdq_shadow_page_vaddr);
+}
+
+static int alloc_wqes_shadow(struct hinic_wq *wq)
+{
+ u64 size;
+
+ /* if wq->max_wqe_size == 0, we don't need to alloc shadow */
+ if (wq->max_wqe_size <= wq->wqebb_size)
+ return 0;
+
+ size = (u64)wq->num_q_pages * wq->max_wqe_size;
+ wq->shadow_wqe = kzalloc(size, GFP_KERNEL);
+ if (!wq->shadow_wqe) {
+ pr_err("Failed to allocate shadow wqe\n");
+ return -ENOMEM;
+ }
+
+ size = wq->num_q_pages * sizeof(wq->prod_idx);
+ wq->shadow_idx = kzalloc(size, GFP_KERNEL);
+ if (!wq->shadow_idx) {
+ pr_err("Failed to allocate shadow index\n");
+ goto shadow_idx_err;
+ }
+
+ return 0;
+
+shadow_idx_err:
+ kfree(wq->shadow_wqe);
+ return -ENOMEM;
+}
+
+static void free_wqes_shadow(struct hinic_wq *wq)
+{
+ if (wq->max_wqe_size <= wq->wqebb_size)
+ return;
+
+ kfree(wq->shadow_idx);
+ kfree(wq->shadow_wqe);
+}
+
+static void free_wq_pages(void *handle, struct hinic_wq *wq,
+ u32 num_q_pages)
+{
+ u32 i;
+
+ for (i = 0; i < num_q_pages; i++)
+ hinic_dma_free_coherent_align(handle, &wq->mem_align[i]);
+
+ free_wqes_shadow(wq);
+
+ wq->block_vaddr = NULL;
+ wq->shadow_block_vaddr = NULL;
+
+ kfree(wq->mem_align);
+}
+
+static int alloc_wq_pages(void *dev_hdl, struct hinic_wq *wq)
+{
+ struct hinic_dma_addr_align *mem_align;
+ u64 *vaddr, *paddr;
+ u32 i, num_q_pages;
+ int err;
+
+ vaddr = wq->shadow_block_vaddr;
+ paddr = wq->block_vaddr;
+
+ num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
+ if (num_q_pages > WQ_MAX_PAGES) {
+ sdk_err(dev_hdl, "Number(%d) wq pages exceeds the limit\n",
+ num_q_pages);
+ return -EINVAL;
+ }
+
+ if (num_q_pages & (num_q_pages - 1)) {
+ sdk_err(dev_hdl, "Wq num(%d) q pages must be power of 2\n",
+ num_q_pages);
+ return -EINVAL;
+ }
+
+ wq->num_q_pages = num_q_pages;
+
+ err = alloc_wqes_shadow(wq);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to allocate wqe shadow\n");
+ return err;
+ }
+
+ wq->mem_align = kcalloc(wq->num_q_pages, sizeof(*wq->mem_align),
+ GFP_KERNEL);
+ if (!wq->mem_align) {
+ free_wqes_shadow(wq);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_q_pages; i++) {
+ mem_align = &wq->mem_align[i];
+ err = hinic_dma_alloc_coherent_align(dev_hdl, wq->wq_page_size,
+ wq->wq_page_size,
+ GFP_KERNEL, mem_align);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to allocate wq page\n");
+ goto alloc_wq_pages_err;
+ }
+
+ *paddr = cpu_to_be64(mem_align->align_paddr);
+ *vaddr = (u64)mem_align->align_vaddr;
+
+ paddr++;
+ vaddr++;
+ }
+
+ return 0;
+
+alloc_wq_pages_err:
+ free_wq_pages(dev_hdl, wq, i);
+
+ return -ENOMEM;
+}
+
+int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
+ u32 wqebb_size, u32 wq_page_size, u16 q_depth,
+ u32 max_wqe_size)
+{
+ u32 num_wqebbs_per_page;
+ int err;
+
+ if (wqebb_size == 0) {
+ sdk_err(wqs->dev_hdl, "Wqebb_size must be >0\n");
+ return -EINVAL;
+ }
+
+ if (q_depth & (q_depth - 1)) {
+ sdk_err(wqs->dev_hdl, "Wq q_depth(%d) isn't power of 2\n",
+ q_depth);
+ return -EINVAL;
+ }
+
+ if (wq_page_size & (wq_page_size - 1)) {
+ sdk_err(wqs->dev_hdl, "Wq page_size(%d) isn't power of 2\n",
+ wq_page_size);
+ return -EINVAL;
+ }
+
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+
+ if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ sdk_err(wqs->dev_hdl, "Num(%d) wqebbs per page isn't power of 2\n",
+ num_wqebbs_per_page);
+ return -EINVAL;
+ }
+
+ err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
+ if (err) {
+ sdk_err(wqs->dev_hdl, "Failed to get free wqs next block\n");
+ return err;
+ }
+
+ wq->wqebb_size = wqebb_size;
+ wq->wq_page_size = wq_page_size;
+ wq->q_depth = q_depth;
+ wq->max_wqe_size = max_wqe_size;
+ wq->num_wqebbs_per_page = num_wqebbs_per_page;
+
+ wq->wqebbs_per_page_shift = (u32)ilog2(num_wqebbs_per_page);
+
+ wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
+ wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
+ wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
+
+ err = alloc_wq_pages(wqs->dev_hdl, wq);
+ if (err) {
+ sdk_err(wqs->dev_hdl, "Failed to allocate wq pages\n");
+ goto alloc_wq_pages_err;
+ }
+
+ atomic_set(&wq->delta, q_depth);
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+ wq->mask = q_depth - 1;
+
+ return 0;
+
+alloc_wq_pages_err:
+ wqs_return_block(wqs, wq->page_idx, wq->block_idx);
+ return err;
+}
+
+void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
+{
+ free_wq_pages(wqs->dev_hdl, wq, wq->num_q_pages);
+
+ wqs_return_block(wqs, wq->page_idx, wq->block_idx);
+}
+
+static int wqs_next_block(struct hinic_wqs *wqs, u32 *page_idx,
+ u32 *block_idx)
+{
+ u32 pos;
+
+ spin_lock(&wqs->alloc_blocks_lock);
+
+ if (wqs->num_free_blks <= 0) {
+ spin_unlock(&wqs->alloc_blocks_lock);
+ return -ENOMEM;
+ }
+ wqs->num_free_blks--;
+
+ pos = wqs->alloc_blk_pos++;
+ pos &= WQS_MAX_NUM_BLOCKS - 1;
+
+ *page_idx = wqs->free_blocks[pos].page_idx;
+ *block_idx = wqs->free_blocks[pos].block_idx;
+
+ wqs->free_blocks[pos].page_idx = 0xFFFFFFFF;
+ wqs->free_blocks[pos].block_idx = 0xFFFFFFFF;
+
+ spin_unlock(&wqs->alloc_blocks_lock);
+
+ return 0;
+}
+
+static void wqs_return_block(struct hinic_wqs *wqs, u32 page_idx,
+ u32 block_idx)
+{
+ u32 pos;
+
+ spin_lock(&wqs->alloc_blocks_lock);
+
+ wqs->num_free_blks++;
+
+ pos = wqs->return_blk_pos++;
+ pos &= WQS_MAX_NUM_BLOCKS - 1;
+
+ wqs->free_blocks[pos].page_idx = page_idx;
+ wqs->free_blocks[pos].block_idx = block_idx;
+
+ spin_unlock(&wqs->alloc_blocks_lock);
+}
+
+static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
+{
+ u32 page_idx, blk_idx, pos = 0;
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
+ for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
+ wqs->free_blocks[pos].page_idx = page_idx;
+ wqs->free_blocks[pos].block_idx = blk_idx;
+ pos++;
+ }
+ }
+
+ wqs->alloc_blk_pos = 0;
+ wqs->return_blk_pos = 0;
+ wqs->num_free_blks = WQS_MAX_NUM_BLOCKS;
+ spin_lock_init(&wqs->alloc_blocks_lock);
+}
+
+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq)
+{
+ u64 *block_vaddr;
+ u32 pg_idx;
+
+ block_vaddr = wq->shadow_block_vaddr;
+
+ atomic_set(&wq->delta, wq->q_depth);
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+
+ for (pg_idx = 0; pg_idx < wq->num_q_pages; pg_idx++)
+ memset((void *)(*(block_vaddr + pg_idx)), 0, wq->wq_page_size);
+}
+
+int hinic_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, void *dev_hdl,
+ int cmdq_blocks, u32 wq_page_size, u32 wqebb_size,
+ u16 q_depth, u32 max_wqe_size)
+{
+ int i, j, err = -ENOMEM;
+
+ if (q_depth & (q_depth - 1)) {
+ sdk_err(dev_hdl, "Cmdq q_depth(%d) isn't power of 2\n",
+ q_depth);
+ return -EINVAL;
+ }
+
+ cmdq_pages->dev_hdl = dev_hdl;
+
+ err = cmdq_allocate_page(cmdq_pages);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to allocate CMDQ page\n");
+ return err;
+ }
+
+ for (i = 0; i < cmdq_blocks; i++) {
+ wq[i].page_idx = 0;
+ wq[i].block_idx = (u32)i;
+ wq[i].wqebb_size = wqebb_size;
+ wq[i].wq_page_size = wq_page_size;
+ wq[i].q_depth = q_depth;
+ wq[i].max_wqe_size = max_wqe_size;
+ wq[i].num_wqebbs_per_page =
+ ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+
+ wq[i].wqebbs_per_page_shift =
+ (u32)ilog2(wq[i].num_wqebbs_per_page);
+
+ wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
+ wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
+ wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
+
+ err = alloc_wq_pages(cmdq_pages->dev_hdl, &wq[i]);
+ if (err) {
+ sdk_err(dev_hdl, "Failed to alloc CMDQ blocks\n");
+ goto cmdq_block_err;
+ }
+
+ atomic_set(&wq[i].delta, q_depth);
+ wq[i].cons_idx = 0;
+ wq[i].prod_idx = 0;
+ wq[i].mask = q_depth - 1;
+ }
+
+ return 0;
+
+cmdq_block_err:
+ for (j = 0; j < i; j++)
+ free_wq_pages(cmdq_pages->dev_hdl, &wq[j], wq[j].num_q_pages);
+
+ cmdq_free_page(cmdq_pages);
+ return err;
+}
+
+void hinic_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, int cmdq_blocks)
+{
+ int i;
+
+ for (i = 0; i < cmdq_blocks; i++)
+ free_wq_pages(cmdq_pages->dev_hdl, &wq[i], wq[i].num_q_pages);
+
+ cmdq_free_page(cmdq_pages);
+}
+
+static int alloc_page_addr(struct hinic_wqs *wqs)
+{
+ u64 size = wqs->num_pages * sizeof(*wqs->page_paddr);
+
+ wqs->page_paddr = kzalloc(size, GFP_KERNEL);
+ if (!wqs->page_paddr)
+ return -ENOMEM;
+
+ size = wqs->num_pages * sizeof(*wqs->page_vaddr);
+ wqs->page_vaddr = kzalloc(size, GFP_KERNEL);
+ if (!wqs->page_vaddr)
+ goto page_vaddr_err;
+
+ size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
+ wqs->shadow_page_vaddr = kzalloc(size, GFP_KERNEL);
+ if (!wqs->shadow_page_vaddr)
+ goto page_shadow_vaddr_err;
+
+ return 0;
+
+page_shadow_vaddr_err:
+ kfree(wqs->page_vaddr);
+
+page_vaddr_err:
+ kfree(wqs->page_paddr);
+ return -ENOMEM;
+}
+
+static void free_page_addr(struct hinic_wqs *wqs)
+{
+ kfree(wqs->shadow_page_vaddr);
+ kfree(wqs->page_vaddr);
+ kfree(wqs->page_paddr);
+}
+
+int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, void *dev_hdl)
+{
+ u32 i, page_idx;
+ int err;
+
+ wqs->dev_hdl = dev_hdl;
+ wqs->num_pages = WQ_NUM_PAGES(num_wqs);
+
+ if (alloc_page_addr(wqs)) {
+ sdk_err(dev_hdl, "Failed to allocate mem for page addresses\n");
+ return -ENOMEM;
+ }
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
+ err = wqs_allocate_page(wqs, page_idx);
+ if (err) {
+ sdk_err(dev_hdl, "Failed wq page allocation\n");
+ goto wq_allocate_page_err;
+ }
+ }
+
+ wqs->free_blocks = kzalloc(WQS_FREE_BLOCKS_SIZE(wqs), GFP_KERNEL);
+ if (!wqs->free_blocks) {
+ err = -ENOMEM;
+ goto alloc_blocks_err;
+ }
+
+ init_wqs_blocks_arr(wqs);
+ return 0;
+
+alloc_blocks_err:
+wq_allocate_page_err:
+ for (i = 0; i < page_idx; i++)
+ wqs_free_page(wqs, i);
+
+ free_page_addr(wqs);
+ return err;
+}
+
+void hinic_wqs_free(struct hinic_wqs *wqs)
+{
+ u32 page_idx;
+
+ for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
+ wqs_free_page(wqs, page_idx);
+
+ free_page_addr(wqs);
+ kfree(wqs->free_blocks);
+}
+
+static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
+ int num_wqebbs, u16 prod_idx)
+{
+ u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr;
+ u32 i, offset;
+ u16 idx;
+
+ for (i = 0; i < (u32)num_wqebbs; i++) {
+ offset = i * wq->wqebb_size;
+ shadow_wqebb_addr = (u8 *)shadow_addr + offset;
+
+ idx = MASKED_WQE_IDX(wq, prod_idx + i);
+ wqe_page_addr = WQ_PAGE_ADDR(wq, idx);
+ wqebb_addr = wqe_page_addr +
+ WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx));
+
+ memcpy(shadow_wqebb_addr, wqebb_addr, wq->wqebb_size);
+ }
+}
+
+static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
+ int num_wqebbs, u16 prod_idx)
+{
+ u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr;
+ u32 i, offset;
+ u16 idx;
+
+ for (i = 0; i < (u32)num_wqebbs; i++) {
+ offset = i * wq->wqebb_size;
+ shadow_wqebb_addr = (u8 *)shadow_addr + offset;
+
+ idx = MASKED_WQE_IDX(wq, prod_idx + i);
+ wqe_page_addr = WQ_PAGE_ADDR(wq, idx);
+ wqebb_addr = wqe_page_addr +
+ WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx));
+
+ memcpy(wqebb_addr, shadow_wqebb_addr, wq->wqebb_size);
+ }
+}
+
+void *hinic_get_wqebb_addr(struct hinic_wq *wq, u16 index)
+{
+ return WQ_PAGE_ADDR(wq, index) + WQE_PAGE_OFF(wq, index);
+}
+
+u64 hinic_get_first_wqe_page_addr(struct hinic_wq *wq)
+{
+ return be64_to_cpu(*wq->block_vaddr);
+}
+
+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx)
+{
+ u32 curr_pg, end_pg;
+ u16 curr_prod_idx, end_prod_idx;
+
+ if (atomic_sub_return(num_wqebbs, &wq->delta) < 0) {
+ atomic_add(num_wqebbs, &wq->delta);
+ return NULL;
+ }
+
+ /* use original cur_pi and end_pi, no need queue depth mask as
+ * WQE_PAGE_NUM will do num_queue_pages mask
+ */
+ curr_prod_idx = (u16)wq->prod_idx;
+ wq->prod_idx += num_wqebbs;
+
+ /* end prod index should points to the last wqebb of wqe,
+ * therefore minus 1
+ */
+ end_prod_idx = (u16)wq->prod_idx - 1;
+
+ curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
+ end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
+
+ *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
+
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
+ u32 offset = curr_pg * wq->max_wqe_size;
+ u8 *shadow_addr = wq->shadow_wqe + offset;
+
+ wq->shadow_idx[curr_pg] = *prod_idx;
+ return shadow_addr;
+ }
+
+ return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
+}
+
+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs)
+{
+ atomic_add(num_wqebbs, &wq->delta);
+ wq->cons_idx += num_wqebbs;
+}
+
+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx)
+{
+ u32 curr_pg, end_pg;
+ u16 curr_cons_idx, end_cons_idx;
+
+ if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
+ return NULL;
+
+ curr_cons_idx = (u16)wq->cons_idx;
+
+ curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
+ end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
+
+ curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
+ end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
+
+ *cons_idx = curr_cons_idx;
+
+ if (curr_pg != end_pg) {
+ u32 offset = curr_pg * wq->max_wqe_size;
+ u8 *shadow_addr = wq->shadow_wqe + offset;
+
+ copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
+
+ return shadow_addr;
+ }
+
+ return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
+}
+
+static inline int wqe_shadow(struct hinic_wq *wq, const void *wqe)
+{
+ void *end_wqe_shadow_addr;
+ u32 wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
+
+ end_wqe_shadow_addr = &wq->shadow_wqe[wqe_shadow_size];
+
+ return WQE_IN_RANGE(wqe, wq->shadow_wqe, end_wqe_shadow_addr);
+}
+
+void hinic_write_wqe(struct hinic_wq *wq, void *wqe, int num_wqebbs)
+{
+ u16 curr_pg;
+ u16 prod_idx;
+
+ if (wqe_shadow(wq, wqe)) {
+ curr_pg = WQE_SHADOW_PAGE(wq, wqe);
+ prod_idx = wq->shadow_idx[curr_pg];
+
+ copy_wqe_from_shadow(wq, wqe, num_wqebbs, prod_idx);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_wq.h
new file mode 100644
index 000000000000..c2a408c3396d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_wq.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef HINIC_WQ_H
+#define HINIC_WQ_H
+
+struct hinic_free_block {
+ u32 page_idx;
+ u32 block_idx;
+};
+
+struct hinic_wq {
+ /* The addresses are 64 bit in the HW */
+ u64 block_paddr;
+ u64 *shadow_block_vaddr;
+ u64 *block_vaddr;
+
+ u32 wqebb_size;
+ u32 wq_page_size;
+ u16 q_depth;
+ u32 max_wqe_size;
+ u32 num_wqebbs_per_page;
+
+ /* performance: replace mul/div as shift;
+ * num_wqebbs_per_page must be power of 2
+ */
+ u32 wqebbs_per_page_shift;
+ u32 page_idx;
+ u32 block_idx;
+
+ u32 num_q_pages;
+
+ struct hinic_dma_addr_align *mem_align;
+
+ int cons_idx;
+ int prod_idx;
+
+ atomic_t delta;
+ u16 mask;
+
+ u8 *shadow_wqe;
+ u16 *shadow_idx;
+};
+
+struct hinic_cmdq_pages {
+ /* The addresses are 64 bit in the HW */
+ u64 cmdq_page_paddr;
+ u64 *cmdq_page_vaddr;
+ u64 *cmdq_shadow_page_vaddr;
+
+ void *dev_hdl;
+};
+
+struct hinic_wqs {
+ /* The addresses are 64 bit in the HW */
+ u64 *page_paddr;
+ u64 **page_vaddr;
+ u64 **shadow_page_vaddr;
+
+ struct hinic_free_block *free_blocks;
+ u32 alloc_blk_pos;
+ u32 return_blk_pos;
+ int num_free_blks;
+
+ /* for allocate blocks */
+ spinlock_t alloc_blocks_lock;
+
+ u32 num_pages;
+
+ void *dev_hdl;
+};
+
+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq);
+
+int hinic_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, void *dev_hdl,
+ int cmdq_blocks, u32 wq_page_size, u32 wqebb_size,
+ u16 q_depth, u32 max_wqe_size);
+
+void hinic_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
+ struct hinic_wq *wq, int cmdq_blocks);
+
+int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, void *dev_hdl);
+
+void hinic_wqs_free(struct hinic_wqs *wqs);
+
+int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
+ u32 wqebb_size, u32 wq_page_size, u16 q_depth,
+ u32 max_wqe_size);
+
+void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
+
+void *hinic_get_wqebb_addr(struct hinic_wq *wq, u16 index);
+
+u64 hinic_get_first_wqe_page_addr(struct hinic_wq *wq);
+
+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx);
+
+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs);
+
+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx);
+
+void hinic_write_wqe(struct hinic_wq *wq, void *wqe, int num_wqebbs);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl.h b/drivers/net/ethernet/huawei/hinic/ossl_knl.h
new file mode 100644
index 000000000000..45d53ae32f3a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/ossl_knl.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0*/
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef OSSL_KNL_H
+#define OSSL_KNL_H
+
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#include <linux/ethtool.h>
+#include <linux/fs.h>
+#include <linux/kthread.h>
+#include <net/checksum.h>
+#include <net/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/udp.h>
+#include <linux/highmem.h>
+
+#define sdk_err(dev, format, ...) \
+ dev_err(dev, "[COMM]" format, ##__VA_ARGS__)
+#define sdk_warn(dev, format, ...) \
+ dev_warn(dev, "[COMM]" format, ##__VA_ARGS__)
+#define sdk_notice(dev, format, ...) \
+ dev_notice(dev, "[COMM]" format, ##__VA_ARGS__)
+#define sdk_info(dev, format, ...) \
+ dev_info(dev, "[COMM]" format, ##__VA_ARGS__)
+
+#define nic_err(dev, format, ...) \
+ dev_err(dev, "[NIC]" format, ##__VA_ARGS__)
+#define nic_warn(dev, format, ...) \
+ dev_warn(dev, "[NIC]" format, ##__VA_ARGS__)
+#define nic_notice(dev, format, ...) \
+ dev_notice(dev, "[NIC]" format, ##__VA_ARGS__)
+#define nic_info(dev, format, ...) \
+ dev_info(dev, "[NIC]" format, ##__VA_ARGS__)
+
+#define nicif_err(priv, type, dev, fmt, args...) \
+ netif_level(err, priv, type, dev, "[NIC]" fmt, ##args)
+#define nicif_warn(priv, type, dev, fmt, args...) \
+ netif_level(warn, priv, type, dev, "[NIC]" fmt, ##args)
+#define nicif_notice(priv, type, dev, fmt, args...) \
+ netif_level(notice, priv, type, dev, "[NIC]" fmt, ##args)
+#define nicif_info(priv, type, dev, fmt, args...) \
+ netif_level(info, priv, type, dev, "[NIC]" fmt, ##args)
+#define nicif_dbg(priv, type, dev, fmt, args...) \
+ netif_level(dbg, priv, type, dev, "[NIC]" fmt, ##args)
+
+#define tasklet_state(tasklet) ((tasklet)->state)
+
+#endif /* OSSL_KNL_H */
--
2.24.0