From: zhuyikai zhuyikai1@h-partners.com
driver inclusion category: Requirement bugzilla: https://gitee.com/openeuler/kernel/issues/IB5BV0?from=project-issue CVE: NA
--------------------------------
Add new cmdq ops.
Support new cmdq cqe format and new mode.
Change cmdq buffer size.
Change VAT RQ buffer size.
Add new RQ cqe format.
Add new RQ CI ctx.
Change tx wqe type and task section.
Signed-off-by: zhuyikai zhuyikai1@h-partners.com --- drivers/net/ethernet/huawei/hinic3/Makefile | 6 + drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c | 144 +++++ drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.h | 55 ++ drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c | 134 +++++ drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h | 38 ++ drivers/net/ethernet/huawei/hinic3/comm_defs.h | 2 +- drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c | 1 - drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c | 3 - drivers/net/ethernet/huawei/hinic3/hinic3_cmdq_adapter.c | 12 + drivers/net/ethernet/huawei/hinic3/hinic3_crm.h | 2 +- drivers/net/ethernet/huawei/hinic3/hinic3_main.c | 10 + drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h | 22 +- drivers/net/ethernet/huawei/hinic3/hinic3_mt.h | 1 - drivers/net/ethernet/huawei/hinic3/hinic3_nic.h | 26 +- drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c | 49 +- drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h | 1 + drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg_vf.c | 24 +- drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmd.h | 2 + drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h | 85 +++ drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c | 4 +- drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h | 4 +- drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c | 350 +++++------ drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h | 58 +- drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h | 64 ++ drivers/net/ethernet/huawei/hinic3/hinic3_rss_cfg.c | 51 +- drivers/net/ethernet/huawei/hinic3/hinic3_rx.c | 199 ++++--- drivers/net/ethernet/huawei/hinic3/hinic3_rx.h | 4 + drivers/net/ethernet/huawei/hinic3/hinic3_tx.c | 2 +- drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c | 707 ++++++++++++++--------- drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h | 46 ++ drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.c | 151 +++++ drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.h | 170 ++++++ drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c | 18 + drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.h | 2 + drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c | 3 +- drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c | 35 +- drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h | 3 + drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h | 7 +- drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h | 96 +-- drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h | 2 + drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h | 29 +- drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h | 2 +- drivers/net/ethernet/huawei/hinic3/nic_cfg_comm.h | 4 +- 43 files changed, 1861 insertions(+), 767 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c create mode 100644 drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.h create mode 100644 drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c create mode 100644 drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hinic3_cmdq_adapter.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.h
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile index 7304b1f..6f9ed3f 100644 --- a/drivers/net/ethernet/huawei/hinic3/Makefile +++ b/drivers/net/ethernet/huawei/hinic3/Makefile @@ -2,6 +2,8 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/hw/ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/cqm/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/cqm/ ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/public/ @@ -31,10 +33,14 @@ hinic3-objs := hw/hinic3_hwdev.o \ hw/hinic3_hw_mt.o \ hw/hinic3_nictool.o \ hw/hinic3_devlink.o \ + hw/hinic3_cmdq_enhance.o \ hw/ossl_knl_linux.o \ hw/hinic3_multi_host_mgmt.o \ + adapter/hw_cmdq/hw_cmdq_ops.o \ + adapter/sw_cmdq/sw_cmdq_ops.o \ bond/hinic3_bond.o \ hinic3_main.o \ + hinic3_cmdq_adapter.o \ hinic3_tx.o \ hinic3_rx.o \ hinic3_rss.o \ diff --git a/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c b/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c new file mode 100644 index 0000000..11634d9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "hinic3_nic_cmdq.h" +#include "hw_cmdq_ops.h" + +static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_header *qp_ctxt_hdr, + enum hinic3_qp_ctxt_type ctxt_type, u16 num_queues, + u16 q_id, u16 func_id) +{ + qp_ctxt_hdr->queue_type = ctxt_type; + qp_ctxt_hdr->num_queues = num_queues; + qp_ctxt_hdr->start_qid = q_id; + qp_ctxt_hdr->dest_func_id = func_id; + + hinic3_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +static u8 prepare_cmd_buf_qp_context_multi_store(struct hinic3_nic_io *nic_io, + struct hinic3_cmd_buf *cmd_buf, + enum hinic3_qp_ctxt_type ctxt_type, + u16 start_qid, u16 max_ctxts) +{ + struct hinic3_qp_ctxt_block *qp_ctxt_block = NULL; + u16 func_id; + u16 i; + + qp_ctxt_block = cmd_buf->buf; + func_id = hinic3_global_func_id(nic_io->hwdev); + hinic3_qp_prepare_cmdq_header(&qp_ctxt_block->cmdq_hdr, ctxt_type, + max_ctxts, start_qid, func_id); + + for (i = 0; i < max_ctxts; i++) { + if (ctxt_type == HINIC3_QP_CTXT_TYPE_RQ) + hinic3_rq_prepare_ctxt(&nic_io->rq[start_qid + i], + &qp_ctxt_block->rq_ctxt[i]); + else + hinic3_sq_prepare_ctxt(&nic_io->sq[start_qid + i], + start_qid + i, + &qp_ctxt_block->sq_ctxt[i]); + } + + return (u8)HINIC3_HTN_CMD_SQ_RQ_CONTEXT_MULTI_ST; +} + +static u8 prepare_cmd_buf_clean_tso_lro_space(struct hinic3_nic_io *nic_io, + struct hinic3_cmd_buf *cmd_buf, + enum hinic3_qp_ctxt_type ctxt_type) +{ + struct hinic3_clean_queue_ctxt *ctxt_block = NULL; + + ctxt_block = cmd_buf->buf; + ctxt_block->cmdq_hdr.dest_func_id = hinic3_global_func_id(nic_io->hwdev); + ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps; + ctxt_block->cmdq_hdr.queue_type = ctxt_type; + ctxt_block->cmdq_hdr.start_qid = 0; + + hinic3_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); + + cmd_buf->size = sizeof(*ctxt_block); + return (u8)HINIC3_HTN_CMD_TSO_LRO_SPACE_CLEAN; +} + +static void prepare_rss_indir_table_cmd_header(const struct hinic3_nic_io *nic_io, + const struct hinic3_cmd_buf *cmd_buf) +{ + struct hinic3_rss_cmd_header *header = cmd_buf->buf; + + header->dest_func_id = hinic3_global_func_id(nic_io->hwdev); + hinic3_cpu_to_be32(header, sizeof(*header)); +} + +static u8 prepare_cmd_buf_set_rss_indir_table(const struct hinic3_nic_io *nic_io, + const u32 *indir_table, + struct hinic3_cmd_buf *cmd_buf) +{ + u32 i; + u8 *indir_tbl = NULL; + + indir_tbl = (u8 *)cmd_buf->buf + sizeof(struct hinic3_rss_cmd_header); + cmd_buf->size = sizeof(struct hinic3_rss_cmd_header) + NIC_RSS_INDIR_SIZE; + memset(indir_tbl, 0, NIC_RSS_INDIR_SIZE); + + prepare_rss_indir_table_cmd_header(nic_io, cmd_buf); + + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) + indir_tbl[i] = (u8)(*(indir_table + i)); + + hinic3_cpu_to_be32(indir_tbl, NIC_RSS_INDIR_SIZE); + + return (u8)HINIC3_HTN_CMD_SET_RSS_INDIR_TABLE; +} + +static u8 prepare_cmd_buf_get_rss_indir_table(const struct hinic3_nic_io *nic_io, + const struct hinic3_cmd_buf *cmd_buf) +{ + memset(cmd_buf->buf, 0, cmd_buf->size); + prepare_rss_indir_table_cmd_header(nic_io, cmd_buf); + + return (u8)HINIC3_HTN_CMD_GET_RSS_INDIR_TABLE; +} + +static void cmd_buf_to_rss_indir_table(const struct hinic3_cmd_buf *cmd_buf, u32 *indir_table) +{ + u32 i; + u8 *indir_tbl = NULL; + + indir_tbl = (u8 *)cmd_buf->buf; + hinic3_be32_to_cpu(cmd_buf->buf, NIC_RSS_INDIR_SIZE); + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) + indir_table[i] = *(indir_tbl + i); +} + +static u8 prepare_cmd_buf_modify_svlan(struct hinic3_cmd_buf *cmd_buf, + u16 func_id, u16 vlan_tag, u16 q_id, u8 vlan_mode) +{ + struct hinic3_vlan_ctx *vlan_ctx = NULL; + + cmd_buf->size = sizeof(struct hinic3_vlan_ctx); + vlan_ctx = (struct hinic3_vlan_ctx *)cmd_buf->buf; + + vlan_ctx->dest_func_id = func_id; + vlan_ctx->start_qid = q_id; + vlan_ctx->vlan_tag = vlan_tag; + vlan_ctx->vlan_sel = 0; /* TPID0 in IPSU */ + vlan_ctx->vlan_mode = vlan_mode; + + hinic3_cpu_to_be32(vlan_ctx, sizeof(struct hinic3_vlan_ctx)); + return (u8)HINIC3_HTN_CMD_SVLAN_MODIFY; +} + +struct hinic3_nic_cmdq_ops *hinic3_nic_cmdq_get_hw_ops(void) +{ + static struct hinic3_nic_cmdq_ops cmdq_hw_ops = { + .prepare_cmd_buf_clean_tso_lro_space = prepare_cmd_buf_clean_tso_lro_space, + .prepare_cmd_buf_qp_context_multi_store = prepare_cmd_buf_qp_context_multi_store, + .prepare_cmd_buf_modify_svlan = prepare_cmd_buf_modify_svlan, + .prepare_cmd_buf_set_rss_indir_table = prepare_cmd_buf_set_rss_indir_table, + .prepare_cmd_buf_get_rss_indir_table = prepare_cmd_buf_get_rss_indir_table, + .cmd_buf_to_rss_indir_table = cmd_buf_to_rss_indir_table, + }; + + return &cmdq_hw_ops; +} diff --git a/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.h b/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.h new file mode 100644 index 0000000..b75f0dd --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef _HW_CMDQ_PRIVATE_H_ +#define _HW_CMDQ_PRIVATE_H_ + +#include "ossl_knl.h" +#include "hinic3_nic_cmdq.h" + +struct hinic3_qp_ctxt_header { + u32 rsvd[2]; + u16 num_queues; + u16 queue_type; + u16 start_qid; + u16 dest_func_id; +}; + +struct hinic3_clean_queue_ctxt { + struct hinic3_qp_ctxt_header cmdq_hdr; +}; + +struct hinic3_qp_ctxt_block { + struct hinic3_qp_ctxt_header cmdq_hdr; + union { + struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX]; + struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX]; + }; +}; + +struct hinic3_rss_cmd_header { + u32 rsv[3]; + u16 rsv1; + u16 dest_func_id; +}; + +/* NIC HTN CMD */ +enum hinic3_htn_cmd { + HINIC3_HTN_CMD_SQ_RQ_CONTEXT_MULTI_ST = 0x20, + HINIC3_HTN_CMD_SQ_RQ_CONTEXT_MULTI_LD, + HINIC3_HTN_CMD_TSO_LRO_SPACE_CLEAN, + HINIC3_HTN_CMD_SVLAN_MODIFY, + HINIC3_HTN_CMD_SET_RSS_INDIR_TABLE, + HINIC3_HTN_CMD_GET_RSS_INDIR_TABLE +}; + +struct hinic3_vlan_ctx { + u32 rsv[2]; + u16 vlan_tag; + u8 vlan_sel; + u8 vlan_mode; + u16 start_qid; + u16 dest_func_id; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c b/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c new file mode 100644 index 0000000..cc2e4b3 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "hinic3_nic_cmd.h" +#include "hinic3_nic_cmdq.h" +#include "sw_cmdq_ops.h" + +static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_header *qp_ctxt_hdr, + enum hinic3_qp_ctxt_type ctxt_type, u16 num_queues, + u16 q_id) +{ + qp_ctxt_hdr->queue_type = ctxt_type; + qp_ctxt_hdr->num_queues = num_queues; + qp_ctxt_hdr->start_qid = q_id; + qp_ctxt_hdr->rsvd = 0; + + hinic3_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +static u8 prepare_cmd_buf_qp_context_multi_store(struct hinic3_nic_io *nic_io, + struct hinic3_cmd_buf *cmd_buf, + enum hinic3_qp_ctxt_type ctxt_type, + u16 start_qid, u16 max_ctxts) +{ + struct hinic3_qp_ctxt_block *qp_ctxt_block = NULL; + u16 i; + + qp_ctxt_block = cmd_buf->buf; + + hinic3_qp_prepare_cmdq_header(&qp_ctxt_block->cmdq_hdr, ctxt_type, + max_ctxts, start_qid); + + for (i = 0; i < max_ctxts; i++) { + if (ctxt_type == HINIC3_QP_CTXT_TYPE_RQ) + hinic3_rq_prepare_ctxt(&nic_io->rq[start_qid + i], + &qp_ctxt_block->rq_ctxt[i]); + else + hinic3_sq_prepare_ctxt(&nic_io->sq[start_qid + i], start_qid + i, + &qp_ctxt_block->sq_ctxt[i]); + } + + return (u8)HINIC3_UCODE_CMD_MODIFY_QUEUE_CTX; +} + +static u8 prepare_cmd_buf_clean_tso_lro_space(struct hinic3_nic_io *nic_io, + struct hinic3_cmd_buf *cmd_buf, + enum hinic3_qp_ctxt_type ctxt_type) +{ + struct hinic3_clean_queue_ctxt *ctxt_block = NULL; + + ctxt_block = cmd_buf->buf; + ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps; + ctxt_block->cmdq_hdr.queue_type = ctxt_type; + ctxt_block->cmdq_hdr.start_qid = 0; + + hinic3_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); + + cmd_buf->size = sizeof(*ctxt_block); + return (u8)HINIC3_UCODE_CMD_CLEAN_QUEUE_CONTEXT; +} + +static u8 prepare_cmd_buf_set_rss_indir_table(const struct hinic3_nic_io *nic_io, + const u32 *indir_table, + struct hinic3_cmd_buf *cmd_buf) +{ + u32 i, size; + u32 *temp = NULL; + struct nic_rss_indirect_tbl *indir_tbl = NULL; + + indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf; + cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); + memset(indir_tbl, 0, sizeof(*indir_tbl)); + + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) + indir_tbl->entry[i] = (u16)(*(indir_table + i)); + + size = sizeof(indir_tbl->entry) / sizeof(u32); + temp = (u32 *)indir_tbl->entry; + for (i = 0; i < size; i++) + temp[i] = cpu_to_be32(temp[i]); + + return (u8)HINIC3_UCODE_CMD_SET_RSS_INDIR_TABLE; +} + +static u8 prepare_cmd_buf_get_rss_indir_table(const struct hinic3_nic_io *nic_io, + const struct hinic3_cmd_buf *cmd_buf) +{ + (void)nic_io; + memset(cmd_buf->buf, 0, cmd_buf->size); + + return (u8)HINIC3_UCODE_CMD_GET_RSS_INDIR_TABLE; +} + +static void cmd_buf_to_rss_indir_table(const struct hinic3_cmd_buf *cmd_buf, u32 *indir_table) +{ + u32 i; + u16 *indir_tbl = NULL; + + indir_tbl = (u16 *)cmd_buf->buf; + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) + indir_table[i] = *(indir_tbl + i); +} + +static u8 prepare_cmd_buf_modify_svlan(struct hinic3_cmd_buf *cmd_buf, + u16 func_id, u16 vlan_tag, u16 q_id, u8 vlan_mode) +{ + struct nic_vlan_ctx *vlan_ctx = NULL; + + cmd_buf->size = sizeof(struct nic_vlan_ctx); + vlan_ctx = (struct nic_vlan_ctx *)cmd_buf->buf; + + vlan_ctx->func_id = func_id; + vlan_ctx->qid = q_id; + vlan_ctx->vlan_tag = vlan_tag; + vlan_ctx->vlan_sel = 0; /* TPID0 in IPSU */ + vlan_ctx->vlan_mode = vlan_mode; + + hinic3_cpu_to_be32(vlan_ctx, sizeof(struct nic_vlan_ctx)); + return (u8)HINIC3_UCODE_CMD_MODIFY_VLAN_CTX; +} + +struct hinic3_nic_cmdq_ops *hinic3_nic_cmdq_get_sw_ops(void) +{ + static struct hinic3_nic_cmdq_ops cmdq_sw_ops = { + .prepare_cmd_buf_clean_tso_lro_space = prepare_cmd_buf_clean_tso_lro_space, + .prepare_cmd_buf_qp_context_multi_store = prepare_cmd_buf_qp_context_multi_store, + .prepare_cmd_buf_modify_svlan = prepare_cmd_buf_modify_svlan, + .prepare_cmd_buf_set_rss_indir_table = prepare_cmd_buf_set_rss_indir_table, + .prepare_cmd_buf_get_rss_indir_table = prepare_cmd_buf_get_rss_indir_table, + .cmd_buf_to_rss_indir_table = cmd_buf_to_rss_indir_table, + }; + + return &cmdq_sw_ops; +} diff --git a/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h b/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h new file mode 100644 index 0000000..ea68b9f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef _SW_CMDQ_PRIVATE_H_ +#define _SW_CMDQ_PRIVATE_H_ + +#include "ossl_knl.h" +#include "hinic3_nic_cmdq.h" + +struct hinic3_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u16 start_qid; + u16 rsvd; +}; + +struct hinic3_clean_queue_ctxt { + struct hinic3_qp_ctxt_header cmdq_hdr; + u32 rsvd; +}; + +struct hinic3_qp_ctxt_block { + struct hinic3_qp_ctxt_header cmdq_hdr; + union { + struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX]; + struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX]; + }; +}; + +struct hinic3_vlan_ctx { + u32 func_id; + u32 qid; /* if qid = 0xFFFF, config current function all queue */ + u32 vlan_id; + u32 vlan_mode; + u32 vlan_sel; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/comm_defs.h b/drivers/net/ethernet/huawei/hinic3/comm_defs.h index 63ee8dc..c5b1913 100644 --- a/drivers/net/ethernet/huawei/hinic3/comm_defs.h +++ b/drivers/net/ethernet/huawei/hinic3/comm_defs.h @@ -23,7 +23,7 @@ enum hinic3_mod_type { HINIC3_MOD_RSVD1 = 6, HINIC3_MOD_CFGM = 7, /* Configuration module */ HINIC3_MOD_CQM = 8, - HINIC3_MOD_RSVD2 = 9, + HINIC3_MOD_VMSEC = 9, COMM_MOD_FC = 10, HINIC3_MOD_OVS = 11, HINIC3_MOD_DSW = 12, diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c index 76e128e..8c95032 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c @@ -1529,7 +1529,6 @@ static s32 cqm_cla_update(struct tag_cqm_handle *cqm_handle, else cmd.func_id = 0xffff;
- /* Normal mode is 1822 traditional mode and is configured on SMF0. */ /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c index f4844d5..d5fac94 100644 --- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c @@ -661,9 +661,6 @@ s32 cqm_cla_cache_invalid(struct tag_cqm_handle *cqm_handle, dma_addr_t pa, u32 cmd.gpa_l = CQM_ADDR_LW(pa); cmd.gpa_h = cla_gpa_h;
- /* The normal mode is the 1822 traditional mode and is all configured - * on SMF0. - */ /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq_adapter.c b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq_adapter.c new file mode 100644 index 0000000..4d1caab --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq_adapter.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "hinic3_nic_cmdq.h" + +void hinic3_nic_cmdq_adapt_init(struct hinic3_nic_io *nic_io) +{ + if (!HINIC3_SUPPORT_FEATURE(nic_io->hwdev, HTN_CMDQ)) + nic_io->cmdq_ops = hinic3_nic_cmdq_get_sw_ops(); + else + nic_io->cmdq_ops = hinic3_nic_cmdq_get_hw_ops(); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h index f269691..14d409b 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h @@ -8,7 +8,7 @@
#include "mpu_cmd_base_defs.h"
-#define HINIC3_DRV_VERSION "15.17.1.1" +#define HINIC3_DRV_VERSION "15.17.1.2" #define HINIC3_DRV_DESC "Intelligent Network Interface Card Driver" #define HIUDK_DRV_DESC "Intelligent Network Unified Driver"
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c index 8cffad0..439cbbd 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c @@ -391,6 +391,14 @@ static void hinic3_free_txrxqs(struct hinic3_nic_dev *nic_dev) hinic3_free_txqs(nic_dev->netdev); }
+static void hinic3_tx_rx_ops_init(struct hinic3_nic_dev *nic_dev) +{ + if (HINIC3_SUPPORT_RX_COMPACT_CQE(nic_dev->hwdev)) + nic_dev->tx_rx_ops.rx_get_cqe_info = hinic3_rx_get_compact_cqe_info; + else + nic_dev->tx_rx_ops.rx_get_cqe_info = hinic3_rx_get_cqe_info; +} + static void hinic3_sw_deinit(struct hinic3_nic_dev *nic_dev) { hinic3_free_txrxqs(nic_dev); @@ -482,6 +490,8 @@ static int hinic3_sw_init(struct hinic3_nic_dev *nic_dev) goto alloc_qps_err; }
+ hinic3_tx_rx_ops_init(nic_dev); + return 0;
alloc_qps_err: diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h index 522518d..cb873f4 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h @@ -43,9 +43,11 @@ enum nic_feature_cap { NIC_F_VF_MAC = BIT(15), NIC_F_RATE_LIMIT = BIT(16), NIC_F_RXQ_RECOVERY = BIT(17), + NIC_F_RX_COMPACT_CQE = BIT(20), + NIC_F_HTN_CMDQ = BIT(21), };
-#define NIC_F_ALL_MASK 0x3FFFF /* ������������������ */ +#define NIC_F_ALL_MASK 0x7FBFFFF /* ������������������ */
struct hinic3_mgmt_msg_head { u8 status; @@ -294,6 +296,24 @@ union sm_tbl_args { u32 args[4]; };
+struct hinic3_rq_cqe_ctx { + struct hinic3_mgmt_msg_head msg_head; + + u8 cqe_type; + u8 rq_id; + u8 threshold_cqe_num; + u8 rsvd1; + + u16 msix_entry_idx; + u16 rsvd2; + + u32 ci_addr_hi; + u32 ci_addr_lo; + + u16 timer_loop; + u16 rsvd3; +}; + #define DFX_SM_TBL_BUF_MAX (768)
struct nic_cmd_dfx_sm_table { diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h index 774193a..507f569 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h @@ -286,7 +286,6 @@ enum hinic3_fault_type { };
struct fault_event_stats { - /* TODO :HINIC_NODE_ID_MAX: temp use the value of 1822(22) */ atomic_t chip_fault_stats[22][FAULT_LEVEL_MAX]; atomic_t fault_type_stat[FAULT_TYPE_MAX]; atomic_t pcie_fault_stats; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h index cc00bdc..11c1731 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h @@ -21,6 +21,9 @@ #define ARRAY_INDEX_6 6 #define ARRAY_INDEX_7 7
+#define SQ_CI_ADDR_SHIFT 2 +#define RQ_CI_ADDR_SHIFT 4 + struct hinic3_sq_attr { u8 dma_attr_off; u8 pending_limit; @@ -31,6 +34,16 @@ struct hinic3_sq_attr { u64 ci_dma_base; };
+struct hinic3_rq_attr { + u8 cqe_type; + u8 pending_limit; + u8 coalescing_time; + u8 rsv; + u16 intr_idx; + u32 l2nic_rqn; + u64 ci_dma_base; +}; + struct vf_data_storage { u8 drv_mac_addr[ETH_ALEN]; u8 user_mac_addr[ETH_ALEN]; @@ -78,6 +91,7 @@ struct hinic3_nic_cfg { struct mutex sfp_mutex; /* mutex used for copy sfp info */ };
+struct hinic3_nic_cmdq_ops; struct hinic3_nic_io { void *hwdev; void *pcidev_hdl; @@ -93,8 +107,11 @@ struct hinic3_nic_io { u16 num_qps; u16 max_qps;
- void *ci_vaddr_base; - dma_addr_t ci_dma_base; + void *sq_ci_vaddr_base; + dma_addr_t sq_ci_dma_base; + + void *rq_ci_vaddr_base; + dma_addr_t rq_ci_dma_base;
u8 __iomem *sqs_db_addr; u8 __iomem *rqs_db_addr; @@ -112,6 +129,7 @@ struct hinic3_nic_io { u32 rsvd6; u64 feature_cap; u64 rsvd7; + struct hinic3_nic_cmdq_ops *cmdq_ops; };
struct vf_msg_handler { @@ -127,7 +145,9 @@ struct nic_event_handler { void *buf_out, u16 *out_size); };
-int hinic3_set_ci_table(void *hwdev, struct hinic3_sq_attr *attr); +int hinic3_set_sq_ci_ctx(struct hinic3_nic_io *nic_io, struct hinic3_sq_attr *attr); + +int hinic3_set_rq_ci_ctx(struct hinic3_nic_io *nic_io, struct hinic3_rq_attr *attr);
int l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c index 7e73314..747ac03 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c @@ -20,26 +20,22 @@ #include "hinic3_nic_io.h" #include "hinic3_srv_nic.h" #include "hinic3_nic.h" +#include "hinic3_nic_cmdq.h" #include "hinic3_nic_cmd.h" #include "hinic3_common.h" #include "hinic3_nic_cfg.h"
-int hinic3_set_ci_table(void *hwdev, struct hinic3_sq_attr *attr) +int hinic3_set_sq_ci_ctx(struct hinic3_nic_io *nic_io, struct hinic3_sq_attr *attr) { struct hinic3_cmd_cons_idx_attr cons_idx_attr; u16 out_size = sizeof(cons_idx_attr); - struct hinic3_nic_io *nic_io = NULL; int err;
- if (!hwdev || !attr) + if (!nic_io || !attr) return -EINVAL;
memset(&cons_idx_attr, 0, sizeof(cons_idx_attr)); - - nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); - - cons_idx_attr.func_idx = hinic3_global_func_id(hwdev); - + cons_idx_attr.func_idx = hinic3_global_func_id(nic_io->hwdev); cons_idx_attr.dma_attr_off = attr->dma_attr_off; cons_idx_attr.pending_limit = attr->pending_limit; cons_idx_attr.coalescing_time = attr->coalescing_time; @@ -50,13 +46,13 @@ int hinic3_set_ci_table(void *hwdev, struct hinic3_sq_attr *attr) }
cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; - cons_idx_attr.ci_addr = attr->ci_dma_base; + cons_idx_attr.ci_addr = attr->ci_dma_base >> SQ_CI_ADDR_SHIFT;
- err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_SQ_CI_ATTR_SET, + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC3_NIC_CMD_SQ_CI_ATTR_SET, &cons_idx_attr, sizeof(cons_idx_attr), &cons_idx_attr, &out_size); if (err || !out_size || cons_idx_attr.msg_head.status) { - sdk_err(nic_io->dev_hdl, + nic_err(nic_io->dev_hdl, "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n", err, cons_idx_attr.msg_head.status, out_size); return -EFAULT; @@ -65,6 +61,36 @@ int hinic3_set_ci_table(void *hwdev, struct hinic3_sq_attr *attr) return 0; }
+int hinic3_set_rq_ci_ctx(struct hinic3_nic_io *nic_io, struct hinic3_rq_attr *attr) +{ + struct hinic3_rq_cqe_ctx cons_idx_ctx; + u16 out_size = sizeof(cons_idx_ctx); + int err; + + if (!nic_io || !attr) + return -EINVAL; + + memset(&cons_idx_ctx, 0, sizeof(cons_idx_ctx)); + cons_idx_ctx.cqe_type = attr->cqe_type; + cons_idx_ctx.rq_id = (u8)(attr->l2nic_rqn & 0xff); + cons_idx_ctx.timer_loop = attr->coalescing_time; + cons_idx_ctx.threshold_cqe_num = attr->pending_limit; + cons_idx_ctx.msix_entry_idx = attr->intr_idx; + cons_idx_ctx.ci_addr_hi = upper_32_bits(attr->ci_dma_base >> RQ_CI_ADDR_SHIFT); + cons_idx_ctx.ci_addr_lo = lower_32_bits(attr->ci_dma_base >> RQ_CI_ADDR_SHIFT); + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC3_NIC_CMD_SET_RQ_CI_CTX, + &cons_idx_ctx, sizeof(cons_idx_ctx), + &cons_idx_ctx, &out_size); + if (err || !out_size || cons_idx_ctx.msg_head.status) { + nic_err(nic_io->dev_hdl, "Set rq cqe ctx fail, qid: %d, err: %d, status: 0x%x, out_size: 0x%x", + attr->l2nic_rqn, err, cons_idx_ctx.msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + #define PF_SET_VF_MAC(hwdev, status) \ (hinic3_func_type(hwdev) == TYPE_VF && \ (status) == HINIC3_PF_SET_VF_ALREADY) @@ -1024,6 +1050,7 @@ int hinic3_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, }
sdk_info(dev_hdl, "nic features: 0x%llx\n", nic_io->feature_cap); + hinic3_nic_cmdq_adapt_init(nic_io);
err = hinic3_get_bios_pf_bw_limit(hwdev, &nic_io->nic_cfg.pf_bw_limit); if (err) { diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h index 6cf18d1..232ffda 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h @@ -98,6 +98,7 @@ u64 hinic3_get_feature_cap(void *hwdev); #define HINIC3_SUPPORT_ALLMULTI(hwdev) HINIC3_SUPPORT_FEATURE(hwdev, ALLMULTI) #define HINIC3_SUPPORT_VF_MAC(hwdev) HINIC3_SUPPORT_FEATURE(hwdev, VF_MAC) #define HINIC3_SUPPORT_RATE_LIMIT(hwdev) HINIC3_SUPPORT_FEATURE(hwdev, RATE_LIMIT) +#define HINIC3_SUPPORT_RX_COMPACT_CQE(hwdev) HINIC3_SUPPORT_FEATURE(hwdev, RX_COMPACT_CQE)
#define HINIC3_SUPPORT_RXQ_RECOVERY(hwdev) HINIC3_SUPPORT_FEATURE(hwdev, RXQ_RECOVERY)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg_vf.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg_vf.c index b46cf78..15d081b 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg_vf.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg_vf.c @@ -21,6 +21,7 @@ #include "hinic3_nic_cfg.h" #include "hinic3_srv_nic.h" #include "hinic3_nic.h" +#include "hinic3_nic_cmdq.h" #include "hinic3_nic_cmd.h"
/*lint -e806*/ @@ -41,37 +42,28 @@ enum { static int hinic3_set_vlan_ctx(struct hinic3_nic_io *nic_io, u16 func_id, u16 vlan_tag, u16 q_id, bool add) { - struct nic_vlan_ctx *vlan_ctx = NULL; struct hinic3_cmd_buf *cmd_buf = NULL; u64 out_param = 0; int err; + u8 cmd, vlan_mode;
cmd_buf = hinic3_alloc_cmd_buf(nic_io->hwdev); - if (!cmd_buf) { + if (cmd_buf == NULL) { nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); return -ENOMEM; }
- cmd_buf->size = sizeof(struct nic_vlan_ctx); - vlan_ctx = (struct nic_vlan_ctx *)cmd_buf->buf; + vlan_mode = add ? NIC_QINQ_INSERT_ENABLE : NIC_CVLAN_INSERT_ENABLE;
- vlan_ctx->func_id = func_id; - vlan_ctx->qid = q_id; - vlan_ctx->vlan_tag = vlan_tag; - vlan_ctx->vlan_sel = 0; /* TPID0 in IPSU */ - vlan_ctx->vlan_mode = add ? - NIC_QINQ_INSERT_ENABLE : NIC_CVLAN_INSERT_ENABLE; - - hinic3_cpu_to_be32(vlan_ctx, sizeof(struct nic_vlan_ctx)); + cmd = nic_io->cmdq_ops->prepare_cmd_buf_modify_svlan(cmd_buf, func_id, + vlan_tag, q_id, vlan_mode);
err = hinic3_cmdq_direct_resp(nic_io->hwdev, HINIC3_MOD_L2NIC, - HINIC3_UCODE_CMD_MODIFY_VLAN_CTX, - cmd_buf, &out_param, 0, - HINIC3_CHANNEL_NIC); + cmd, cmd_buf, &out_param, 0, HINIC3_CHANNEL_NIC);
hinic3_free_cmd_buf(nic_io->hwdev, cmd_buf);
- if (err || out_param != 0) { + if ((err != 0) || out_param != 0) { nic_err(nic_io->dev_hdl, "Failed to set vlan context, err: %d, out_param: 0x%llx\n", err, out_param); return -EFAULT; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmd.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmd.h index 31e224a..c35bc82 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmd.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmd.h @@ -34,6 +34,8 @@ enum hinic3_nic_cmd {
HINIC3_NIC_CMD_CACHE_OUT_QP_RES,
+ HINIC3_NIC_CMD_SET_RQ_CI_CTX, + /* MAC & VLAN CFG */ HINIC3_NIC_CMD_GET_MAC = 20, HINIC3_NIC_CMD_SET_MAC, diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h new file mode 100644 index 0000000..461768d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_NIC_CMDQ_H +#define HINIC3_NIC_CMDQ_H + +#include "ossl_knl.h" +#include "hinic3_hw.h" +#include "hinic3_nic.h" + +#define HINIC3_Q_CTXT_MAX 31U /* (2048 - 8) / 64 */ +#define HINIC3_QP_CTXT_HEADER_SIZE 16U + +enum hinic3_qp_ctxt_type { + HINIC3_QP_CTXT_TYPE_SQ, + HINIC3_QP_CTXT_TYPE_RQ, +}; + +struct hinic3_nic_cmdq_ops { + u8 (*prepare_cmd_buf_clean_tso_lro_space)(struct hinic3_nic_io *nic_io, + struct hinic3_cmd_buf *cmd_buf, + enum hinic3_qp_ctxt_type ctxt_type); + u8 (*prepare_cmd_buf_qp_context_multi_store)(struct hinic3_nic_io *nic_io, + struct hinic3_cmd_buf *cmd_buf, + enum hinic3_qp_ctxt_type ctxt_type, + u16 start_qid, u16 max_ctxts); + u8 (*prepare_cmd_buf_modify_svlan)(struct hinic3_cmd_buf *cmd_buf, + u16 func_id, u16 vlan_tag, u16 q_id, u8 vlan_mode); + u8 (*prepare_cmd_buf_set_rss_indir_table)(const struct hinic3_nic_io *nic_io, + const u32 *indir_table, + struct hinic3_cmd_buf *cmd_buf); + u8 (*prepare_cmd_buf_get_rss_indir_table)(const struct hinic3_nic_io *nic_io, + const struct hinic3_cmd_buf *cmd_buf); + void (*cmd_buf_to_rss_indir_table)(const struct hinic3_cmd_buf *cmd_buf, u32 *indir_table); +}; + +struct hinic3_sq_ctxt { + u32 ci_pi; + u32 drop_mode_sp; + u32 wq_pfn_hi_owner; + u32 wq_pfn_lo; + + u32 rsvd0; + u32 pkt_drop_thd; + u32 global_sq_id; + u32 vlan_ceq_attr; + + u32 pref_cache; + u32 pref_ci_owner; + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 rsvd8; + u32 rsvd9; + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic3_rq_ctxt { + u32 ci_pi; + u32 ceq_attr; + u32 wq_pfn_hi_type_owner; + u32 wq_pfn_lo; + + u32 rsvd[3]; + u32 cqe_sge_len; + + u32 pref_cache; + u32 pref_ci_owner; + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic3_nic_cmdq_ops *hinic3_nic_cmdq_get_sw_ops(void); +struct hinic3_nic_cmdq_ops *hinic3_nic_cmdq_get_hw_ops(void); + +void hinic3_nic_cmdq_adapt_init(struct hinic3_nic_io *nic_io); +void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id, struct hinic3_sq_ctxt *sq_ctxt); +void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, struct hinic3_rq_ctxt *rq_ctxt); +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c index 17d48c4..0ec4b32 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c @@ -90,7 +90,7 @@ int hinic3_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, sq_info->q_depth = sq->wq.q_depth; sq_info->wqebb_size = sq->wq.wqebb_size;
- sq_info->ci_addr = sq->tx.cons_idx_addr; + sq_info->ci_addr = sq->cons_idx_addr;
sq_info->cla_addr = sq->wq.wq_block_paddr; sq_info->slq_handle = sq; @@ -128,8 +128,6 @@ int hinic3_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info,
rq_info->q_id = q_id;
- rq_info->hw_pi = cpu_to_be16(*rq->rx.pi_virt_addr); - rq_info->wqebb_size = rq->wq.wqebb_size; rq_info->q_depth = (u16)rq->wq.q_depth;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h index 800fa73..16bed02 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h @@ -17,7 +17,7 @@ #include "hinic3_dcb.h"
#define HINIC3_NIC_DRV_NAME "hinic3" -#define HINIC3_NIC_DRV_VERSION "15.17.1.1" +#define HINIC3_NIC_DRV_VERSION HINIC3_DRV_VERSION
#define HINIC3_FUNC_IS_VF(hwdev) (hinic3_func_type(hwdev) == TYPE_VF)
@@ -283,6 +283,8 @@ struct hinic3_nic_dev { u32 rsvd9; u32 rxq_get_err_times; struct delayed_work rxq_check_work; + + struct hinic3_tx_rx_ops tx_rx_ops; };
#define hinic_msg(level, nic_dev, msglvl, format, arg...) \ diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c index 13314ac..4ac3507 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c @@ -18,6 +18,7 @@ #include "hinic3_nic.h" #include "hinic3_nic_cmd.h" #include "hinic3_nic_io.h" +#include "hinic3_nic_cmdq.h"
#define HINIC3_DEAULT_TX_CI_PENDING_LIMIT 1 #define HINIC3_DEAULT_TX_CI_COALESCING_TIME 1 @@ -60,83 +61,6 @@ MODULE_PARM_DESC(tx_drop_thd_off, "TX parameter drop_thd_off (default=0)"); #define WQ_PREFETCH_MIN 1 #define WQ_PREFETCH_THRESHOLD 256
-#define HINIC3_Q_CTXT_MAX 31 /* (2048 - 8) / 64 */ - -enum hinic3_qp_ctxt_type { - HINIC3_QP_CTXT_TYPE_SQ, - HINIC3_QP_CTXT_TYPE_RQ, -}; - -struct hinic3_qp_ctxt_header { - u16 num_queues; - u16 queue_type; - u16 start_qid; - u16 rsvd; -}; - -struct hinic3_sq_ctxt { - u32 ci_pi; - u32 drop_mode_sp; - u32 wq_pfn_hi_owner; - u32 wq_pfn_lo; - - u32 rsvd0; - u32 pkt_drop_thd; - u32 global_sq_id; - u32 vlan_ceq_attr; - - u32 pref_cache; - u32 pref_ci_owner; - u32 pref_wq_pfn_hi_ci; - u32 pref_wq_pfn_lo; - - u32 rsvd8; - u32 rsvd9; - u32 wq_block_pfn_hi; - u32 wq_block_pfn_lo; -}; - -struct hinic3_rq_ctxt { - u32 ci_pi; - u32 ceq_attr; - u32 wq_pfn_hi_type_owner; - u32 wq_pfn_lo; - - u32 rsvd[3]; - u32 cqe_sge_len; - - u32 pref_cache; - u32 pref_ci_owner; - u32 pref_wq_pfn_hi_ci; - u32 pref_wq_pfn_lo; - - u32 pi_paddr_hi; - u32 pi_paddr_lo; - u32 wq_block_pfn_hi; - u32 wq_block_pfn_lo; -}; - -struct hinic3_sq_ctxt_block { - struct hinic3_qp_ctxt_header cmdq_hdr; - struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX]; -}; - -struct hinic3_rq_ctxt_block { - struct hinic3_qp_ctxt_header cmdq_hdr; - struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX]; -}; - -struct hinic3_clean_queue_ctxt { - struct hinic3_qp_ctxt_header cmdq_hdr; - u32 rsvd; -}; - -#define SQ_CTXT_SIZE(num_sqs) ((u16)(sizeof(struct hinic3_qp_ctxt_header) \ - + (num_sqs) * sizeof(struct hinic3_sq_ctxt))) - -#define RQ_CTXT_SIZE(num_rqs) ((u16)(sizeof(struct hinic3_qp_ctxt_header) \ - + (num_rqs) * sizeof(struct hinic3_rq_ctxt))) - #define CI_IDX_HIGH_SHIFH 12
#define CI_HIGN_IDX(val) ((val) >> CI_IDX_HIGH_SHIFH) @@ -345,12 +269,23 @@ static void hinic3_destroy_sq(struct hinic3_nic_io *nic_io, struct hinic3_io_que hinic3_wq_destroy(&sq->wq); }
+int hinic3_get_rq_wqe_type(void *hwdev) +{ + /* + * rq_wqe_type is the configuration when the driver is installed, + * but it may not be the actual configuration. + */ + if (rq_wqe_type != HINIC3_NORMAL_RQ_WQE && rq_wqe_type != HINIC3_EXTEND_RQ_WQE) + return HINIC3_NORMAL_RQ_WQE; + return rq_wqe_type; +} + static int hinic3_create_rq(struct hinic3_nic_io *nic_io, struct hinic3_io_queue *rq, u16 q_id, u32 rq_depth, u16 rq_msix_idx) { int err;
- rq->wqe_type = rq_wqe_type; + rq->wqe_type = (u8)(hinic3_get_rq_wqe_type(nic_io->hwdev)); rq->q_id = q_id; rq->msix_entry_idx = rq_msix_idx;
@@ -362,23 +297,11 @@ static int hinic3_create_rq(struct hinic3_nic_io *nic_io, struct hinic3_io_queue return err; }
- rq->rx.pi_virt_addr = dma_zalloc_coherent(nic_io->dev_hdl, PAGE_SIZE, - &rq->rx.pi_dma_addr, - GFP_KERNEL); - if (!rq->rx.pi_virt_addr) { - hinic3_wq_destroy(&rq->wq); - nic_err(nic_io->dev_hdl, "Failed to allocate rq pi virt addr\n"); - return -ENOMEM; - } - return 0; }
static void hinic3_destroy_rq(struct hinic3_nic_io *nic_io, struct hinic3_io_queue *rq) { - dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx.pi_virt_addr, - rq->rx.pi_dma_addr); - hinic3_wq_destroy(&rq->wq); }
@@ -429,7 +352,7 @@ int hinic3_init_nicio_res(void *hwdev) nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); if (!nic_io) { pr_err("Failed to get nic service adapter\n"); - return -EFAULT; + goto get_nic_io_fail; }
nic_io->max_qps = hinic3_func_max_qnum(hwdev); @@ -437,30 +360,50 @@ int hinic3_init_nicio_res(void *hwdev) err = hinic3_alloc_db_addr(hwdev, &db_base, NULL); if (err) { nic_err(nic_io->dev_hdl, "Failed to allocate doorbell for sqs\n"); - return -ENOMEM; + goto alloc_sq_db_fail; } nic_io->sqs_db_addr = (u8 *)db_base;
err = hinic3_alloc_db_addr(hwdev, &db_base, NULL); if (err) { - hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr, NULL); nic_err(nic_io->dev_hdl, "Failed to allocate doorbell for rqs\n"); - return -ENOMEM; + goto alloc_rq_db_fail; } nic_io->rqs_db_addr = (u8 *)db_base;
- nic_io->ci_vaddr_base = - dma_zalloc_coherent(nic_io->dev_hdl, - CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), - &nic_io->ci_dma_base, GFP_KERNEL); - if (!nic_io->ci_vaddr_base) { - hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr, NULL); - hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr, NULL); - nic_err(nic_io->dev_hdl, "Failed to allocate ci area\n"); - return -ENOMEM; + nic_io->sq_ci_vaddr_base = + dma_zalloc_coherent(nic_io->dev_hdl, CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), + &nic_io->sq_ci_dma_base, GFP_KERNEL); + if (!nic_io->sq_ci_vaddr_base) { + nic_err(nic_io->dev_hdl, "Failed to allocate sq ci area\n"); + goto alloc_tx_vaddr_base_fail; + } + + nic_io->rq_ci_vaddr_base = + dma_zalloc_coherent(nic_io->dev_hdl, CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), + &nic_io->rq_ci_dma_base, GFP_KERNEL); + if (!nic_io->rq_ci_vaddr_base) { + nic_err(nic_io->dev_hdl, "Failed to allocate rq ci area\n"); + goto alloc_rx_vaddr_base_fail; }
return 0; + +alloc_rx_vaddr_base_fail: + dma_free_coherent(nic_io->dev_hdl, CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), + nic_io->sq_ci_vaddr_base, nic_io->sq_ci_dma_base); + +alloc_tx_vaddr_base_fail: + hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr, NULL); + +alloc_rq_db_fail: + hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr, NULL); + +alloc_sq_db_fail: + return -ENOMEM; + +get_nic_io_fail: + return -EFAULT; }
void hinic3_deinit_nicio_res(void *hwdev) @@ -478,7 +421,10 @@ void hinic3_deinit_nicio_res(void *hwdev)
dma_free_coherent(nic_io->dev_hdl, CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), - nic_io->ci_vaddr_base, nic_io->ci_dma_base); + nic_io->sq_ci_vaddr_base, nic_io->sq_ci_dma_base); + dma_free_coherent(nic_io->dev_hdl, + CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), + nic_io->rq_ci_vaddr_base, nic_io->rq_ci_dma_base); /* free all doorbell */ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr, NULL); hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr, NULL); @@ -580,12 +526,13 @@ static void init_qps_info(struct hinic3_nic_io *nic_io, nic_io->sq = qp_params->sqs; nic_io->rq = qp_params->rqs; for (q_id = 0; q_id < nic_io->num_qps; q_id++) { - sqs[q_id].tx.cons_idx_addr = - HINIC3_CI_VADDR(nic_io->ci_vaddr_base, q_id); + sqs[q_id].cons_idx_addr = HINIC3_CI_VADDR(nic_io->sq_ci_vaddr_base, q_id); /* clear ci value */ - *(u16 *)sqs[q_id].tx.cons_idx_addr = 0; + *(u16 *)sqs[q_id].cons_idx_addr = 0; sqs[q_id].db_addr = nic_io->sqs_db_addr;
+ rqs[q_id].cons_idx_addr = HINIC3_CI_VADDR(nic_io->rq_ci_vaddr_base, q_id); + *(u32 *)rqs[q_id].cons_idx_addr = 0; /* The first num_qps doorbell is used by sq */ rqs[q_id].db_addr = nic_io->rqs_db_addr; } @@ -699,19 +646,7 @@ void *hinic3_get_nic_queue(void *hwdev, u16 q_id, enum hinic3_queue_type q_type) } EXPORT_SYMBOL(hinic3_get_nic_queue);
-static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_header *qp_ctxt_hdr, - enum hinic3_qp_ctxt_type ctxt_type, - u16 num_queues, u16 q_id) -{ - qp_ctxt_hdr->queue_type = ctxt_type; - qp_ctxt_hdr->num_queues = num_queues; - qp_ctxt_hdr->start_qid = q_id; - qp_ctxt_hdr->rsvd = 0; - - hinic3_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); -} - -static void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id, +void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id, struct hinic3_sq_ctxt *sq_ctxt) { u64 wq_page_addr; @@ -803,7 +738,7 @@ static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq, *wq_block_pfn_lo = lower_32_bits(wq_block_pfn); }
-static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, struct hinic3_rq_ctxt *rq_ctxt) +void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, struct hinic3_rq_ctxt *rq_ctxt) { u32 wq_page_pfn_hi, wq_page_pfn_lo; u32 wq_block_pfn_hi, wq_block_pfn_lo; @@ -861,8 +796,6 @@ static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, struct hinic3_rq_
rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
- rq_ctxt->pi_paddr_hi = upper_32_bits(rq->rx.pi_dma_addr); - rq_ctxt->pi_paddr_lo = lower_32_bits(rq->rx.pi_dma_addr);
rq_ctxt->wq_block_pfn_hi = RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); @@ -872,48 +805,38 @@ static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, struct hinic3_rq_ hinic3_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); }
+static inline u16 hinic3_get_max_ctxts(u16 num_qps, u16 cmd_buf_size) +{ + u16 max_ctxts = (cmd_buf_size - HINIC3_QP_CTXT_HEADER_SIZE) / sizeof(struct hinic3_rq_ctxt); + + max_ctxts = min((u16)HINIC3_Q_CTXT_MAX, max_ctxts); + return (u16)min(max_ctxts, num_qps); +} + static int init_sq_ctxts(struct hinic3_nic_io *nic_io) { - struct hinic3_sq_ctxt_block *sq_ctxt_block = NULL; - struct hinic3_sq_ctxt *sq_ctxt = NULL; struct hinic3_cmd_buf *cmd_buf = NULL; - struct hinic3_io_queue *sq = NULL; u64 out_param = 0; - u16 q_id, curr_id, max_ctxts, i; + u16 q_id, max_ctxts; int err = 0; + u8 cmd;
cmd_buf = hinic3_alloc_cmd_buf(nic_io->hwdev); - if (!cmd_buf) { + if (cmd_buf == NULL) { nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); return -ENOMEM; }
q_id = 0; while (q_id < nic_io->num_qps) { - sq_ctxt_block = cmd_buf->buf; - sq_ctxt = sq_ctxt_block->sq_ctxt; - - max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ? - HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id); - - hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr, - HINIC3_QP_CTXT_TYPE_SQ, max_ctxts, - q_id); + max_ctxts = hinic3_get_max_ctxts(nic_io->num_qps - q_id, cmd_buf->size);
- for (i = 0; i < max_ctxts; i++) { - curr_id = q_id + i; - sq = &nic_io->sq[curr_id]; - - hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]); - } - - cmd_buf->size = SQ_CTXT_SIZE(max_ctxts); + cmd = nic_io->cmdq_ops->prepare_cmd_buf_qp_context_multi_store(nic_io, cmd_buf, + HINIC3_QP_CTXT_TYPE_SQ, q_id, max_ctxts);
err = hinic3_cmdq_direct_resp(nic_io->hwdev, HINIC3_MOD_L2NIC, - HINIC3_UCODE_CMD_MODIFY_QUEUE_CTX, - cmd_buf, &out_param, 0, - HINIC3_CHANNEL_NIC); - if (err || out_param != 0) { + cmd, cmd_buf, &out_param, 0, HINIC3_CHANNEL_NIC); + if ((err != 0) || out_param != 0) { nic_err(nic_io->dev_hdl, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n", err, out_param);
@@ -931,46 +854,28 @@ static int init_sq_ctxts(struct hinic3_nic_io *nic_io)
static int init_rq_ctxts(struct hinic3_nic_io *nic_io) { - struct hinic3_rq_ctxt_block *rq_ctxt_block = NULL; - struct hinic3_rq_ctxt *rq_ctxt = NULL; struct hinic3_cmd_buf *cmd_buf = NULL; - struct hinic3_io_queue *rq = NULL; u64 out_param = 0; - u16 q_id, curr_id, max_ctxts, i; + u16 q_id, max_ctxts; + u8 cmd; int err = 0;
cmd_buf = hinic3_alloc_cmd_buf(nic_io->hwdev); - if (!cmd_buf) { + if (cmd_buf == NULL) { nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); return -ENOMEM; }
q_id = 0; while (q_id < nic_io->num_qps) { - rq_ctxt_block = cmd_buf->buf; - rq_ctxt = rq_ctxt_block->rq_ctxt; - - max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ? - HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id); - - hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr, - HINIC3_QP_CTXT_TYPE_RQ, max_ctxts, - q_id); - - for (i = 0; i < max_ctxts; i++) { - curr_id = q_id + i; - rq = &nic_io->rq[curr_id]; - - hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]); - } - - cmd_buf->size = RQ_CTXT_SIZE(max_ctxts); + max_ctxts = hinic3_get_max_ctxts(nic_io->num_qps - q_id, cmd_buf->size);
+ cmd = nic_io->cmdq_ops->prepare_cmd_buf_qp_context_multi_store(nic_io, cmd_buf, + HINIC3_QP_CTXT_TYPE_RQ, q_id, max_ctxts); err = hinic3_cmdq_direct_resp(nic_io->hwdev, HINIC3_MOD_L2NIC, - HINIC3_UCODE_CMD_MODIFY_QUEUE_CTX, - cmd_buf, &out_param, 0, + cmd, cmd_buf, &out_param, 0, HINIC3_CHANNEL_NIC); - if (err || out_param != 0) { + if ((err != 0) || out_param != 0) { nic_err(nic_io->dev_hdl, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n", err, out_param);
@@ -1004,31 +909,27 @@ static int init_qp_ctxts(struct hinic3_nic_io *nic_io) static int clean_queue_offload_ctxt(struct hinic3_nic_io *nic_io, enum hinic3_qp_ctxt_type ctxt_type) { - struct hinic3_clean_queue_ctxt *ctxt_block = NULL; struct hinic3_cmd_buf *cmd_buf = NULL; u64 out_param = 0; + u8 cmd; int err;
cmd_buf = hinic3_alloc_cmd_buf(nic_io->hwdev); - if (!cmd_buf) { + if (cmd_buf == NULL) { nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); return -ENOMEM; }
- ctxt_block = cmd_buf->buf; - ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps; - ctxt_block->cmdq_hdr.queue_type = ctxt_type; - ctxt_block->cmdq_hdr.start_qid = 0; - - hinic3_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); - - cmd_buf->size = sizeof(*ctxt_block); + if (nic_io->cmdq_ops) + cmd = nic_io->cmdq_ops->prepare_cmd_buf_clean_tso_lro_space(nic_io, cmd_buf, + ctxt_type); + else + return -ENOMEM;
err = hinic3_cmdq_direct_resp(nic_io->hwdev, HINIC3_MOD_L2NIC, - HINIC3_UCODE_CMD_CLEAN_QUEUE_CONTEXT, - cmd_buf, &out_param, 0, + cmd, cmd_buf, &out_param, 0, HINIC3_CHANNEL_NIC); - if ((err) || (out_param)) { + if ((err != 0) || (out_param != 0)) { nic_err(nic_io->dev_hdl, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n", err, out_param);
@@ -1047,13 +948,60 @@ static int clean_qp_offload_ctxt(struct hinic3_nic_io *nic_io) clean_queue_offload_ctxt(nic_io, HINIC3_QP_CTXT_TYPE_RQ)); }
+static int init_sq_ci_ctxts(struct hinic3_nic_io *nic_io) +{ + struct hinic3_sq_attr sq_attr; + u16 q_id; + int err; + + for (q_id = 0; q_id < nic_io->num_qps; q_id++) { + sq_attr.ci_dma_base = + HINIC3_CI_PADDR(nic_io->sq_ci_dma_base, q_id); + sq_attr.pending_limit = tx_pending_limit; + sq_attr.coalescing_time = tx_coalescing_time; + sq_attr.intr_en = 1; + sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx; + sq_attr.l2nic_sqn = q_id; + sq_attr.dma_attr_off = 0; + err = hinic3_set_sq_ci_ctx(nic_io, &sq_attr); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set sq ci context\n"); + return -EFAULT; + } + } + + return 0; +} + +static int init_rq_ci_ctxts(struct hinic3_nic_io *nic_io) +{ + struct hinic3_rq_attr rq_attr; + u16 q_id; + int err; + + for (q_id = 0; q_id < nic_io->num_qps; q_id++) { + rq_attr.ci_dma_base = 0; + rq_attr.pending_limit = 0; + rq_attr.coalescing_time = 0; + rq_attr.intr_idx = nic_io->rq[q_id].msix_entry_idx; + rq_attr.l2nic_rqn = q_id; + rq_attr.cqe_type = 0; + + err = hinic3_set_rq_ci_ctx(nic_io, &rq_attr); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set rq ci context\n"); + return -EFAULT; + } + } + + return 0; +} + /* init qps ctxt and set sq ci attr and arm all sq */ int hinic3_init_qp_ctxts(void *hwdev) { struct hinic3_nic_io *nic_io = NULL; - struct hinic3_sq_attr sq_attr; u32 rq_depth; - u16 q_id; int err;
if (!hwdev) @@ -1085,25 +1033,21 @@ int hinic3_init_qp_ctxts(void *hwdev) return err; }
- for (q_id = 0; q_id < nic_io->num_qps; q_id++) { - sq_attr.ci_dma_base = - HINIC3_CI_PADDR(nic_io->ci_dma_base, q_id) >> 0x2; - sq_attr.pending_limit = tx_pending_limit; - sq_attr.coalescing_time = tx_coalescing_time; - sq_attr.intr_en = 1; - sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx; - sq_attr.l2nic_sqn = q_id; - sq_attr.dma_attr_off = 0; - err = hinic3_set_ci_table(hwdev, &sq_attr); + err = init_sq_ci_ctxts(nic_io); + if (err) + goto clean_root_ctxt; + + if (HINIC3_SUPPORT_RX_COMPACT_CQE(hwdev)) { + /* init rxq cqe context */ + err = init_rq_ci_ctxts(nic_io); if (err) { - nic_err(nic_io->dev_hdl, "Failed to set ci table\n"); - goto set_cons_idx_table_err; + goto clean_root_ctxt; } }
return 0;
-set_cons_idx_table_err: +clean_root_ctxt: hinic3_clean_root_ctxt(hwdev, HINIC3_CHANNEL_NIC);
return err; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h index 5c5585a..3e04f6d 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h @@ -38,17 +38,7 @@ struct hinic3_io_queue { u16 msix_entry_idx;
u8 __iomem *db_addr; - - union { - struct { - void *cons_idx_addr; - } tx; - - struct { - u16 *pi_virt_addr; - dma_addr_t pi_dma_addr; - } rx; - }; + void *cons_idx_addr; } ____cacheline_aligned;
struct hinic3_nic_db { @@ -56,6 +46,23 @@ struct hinic3_nic_db { u32 pi_hi; };
+struct hinic3_tx_rx_ops { + void (*rx_get_cqe_info)(void *rx_cqe, void *cqe_info); + bool (*rx_cqe_done)(void *rxq, void **rx_cqe); +}; + +struct hinic3_rq_ci_wb { + union { + struct { + u16 cqe_num; + u16 hw_ci; + } bs; + u32 value; + } dw0; + + u32 rsvd[3]; +}; + #ifdef static #undef static #define LLT_STATIC_DEF_SAVED @@ -110,7 +117,23 @@ static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq) static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq) { return WQ_MASK_IDX(&sq->wq, - hinic3_hw_cpu16(*(u16 *)sq->tx.cons_idx_addr)); + hinic3_hw_cpu16(*(u16 *)sq->cons_idx_addr)); +} + +/* * + * @brief hinic3_get_rq_hw_ci - get recv queue hardware consumer index + * @param rq: recv queue + * @retval : hardware consumer index + */ +static inline u16 hinic3_get_rq_hw_ci(const struct hinic3_io_queue *rq) +{ + u16 hw_ci; + u32 rq_ci_wb; + + rq_ci_wb = hinic3_hw_cpu32(*(u32 *)rq->cons_idx_addr); + hw_ci = ((struct hinic3_rq_ci_wb *) &rq_ci_wb)->dw0.bs.hw_ci; + + return WQ_MASK_IDX(&rq->wq, hw_ci); }
/* * @@ -213,17 +236,6 @@ static inline void *hinic3_rq_wqe_addr(struct hinic3_io_queue *rq, u16 idx) return hinic3_wq_wqebb_addr(&rq->wq, idx); }
-/* * - * @brief hinic3_update_rq_hw_pi - update receive queue hardware pi - * @param rq: receive queue - * @param pi: pi - */ -static inline void hinic3_update_rq_hw_pi(struct hinic3_io_queue *rq, u16 pi) -{ - *rq->rx.pi_virt_addr = cpu_to_be16((pi & rq->wq.idx_mask) << - rq->wqe_type); -} - /* * * @brief hinic3_update_rq_local_ci - update receive queue local consumer index * @param sq: receive queue diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h index 0401349..ddc3308 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h @@ -137,6 +137,50 @@ #define HINIC3_GET_ESP_NEXT_HEAD(decry_info) \ RQ_CQE_DECRY_INFO_GET(decry_info, ESP_NEXT_HEAD)
+/* compact cqe field */ +/* cqe dw0 */ +#define RQ_COMPACT_CQE_STATUS_RXDONE_SHIFT 31 +#define RQ_COMPACT_CQE_STATUS_CQE_TYPE_SHIFT 30 +#define RQ_COMPACT_CQE_STATUS_TS_FLAG_SHIFT 29 +#define RQ_COMPACT_CQE_STATUS_VLAN_EN_SHIFT 28 +#define RQ_COMPACT_CQE_STATUS_PKT_FORMAT_SHIFT 25 +#define RQ_COMPACT_CQE_STATUS_IP_TYPE_SHIFT 24 +#define RQ_COMPACT_CQE_STATUS_CQE_LEN_SHIFT 23 +#define RQ_COMPACT_CQE_STATUS_PKT_MC_SHIFT 21 +#define RQ_COMPACT_CQE_STATUS_CSUM_ERR_SHIFT 19 +#define RQ_COMPACT_CQE_STATUS_PKT_TYPE_SHIFT 16 +#define RQ_COMPACT_CQE_STATUS_PKT_LEN_SHIFT 0 + +#define RQ_COMPACT_CQE_STATUS_RXDONE_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_CQE_TYPE_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_TS_FLAG_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_VLAN_EN_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_PKT_FORMAT_MASK 0x7U +#define RQ_COMPACT_CQE_STATUS_IP_TYPE_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_PKT_MC_MASK 0x3U +#define RQ_COMPACT_CQE_STATUS_CQE_LEN_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_CSUM_ERR_MASK 0x3U +#define RQ_COMPACT_CQE_STATUS_PKT_TYPE_MASK 0x7U +#define RQ_COMPACT_CQE_STATUS_PKT_LEN_MASK 0xFFFFU + +#define RQ_COMPACT_CQE_STATUS_GET(val, member) \ + ((((val) >> RQ_COMPACT_CQE_STATUS_##member##_SHIFT) & \ + RQ_COMPACT_CQE_STATUS_##member##_MASK)) + +/* cqe dw2 */ +#define RQ_COMPACT_CQE_OFFLOAD_NUM_LRO_SHIFT 24 +#define RQ_COMPACT_CQE_OFFLOAD_VLAN_SHIFT 8 + +#define RQ_COMPACT_CQE_OFFLOAD_NUM_LRO_MASK 0xFFU +#define RQ_COMPACT_CQE_OFFLOAD_VLAN_MASK 0xFFFFU + +#define RQ_COMPACT_CQE_OFFLOAD_GET(val, member) \ + (((val) >> RQ_COMPACT_CQE_OFFLOAD_##member##_SHIFT) & \ + RQ_COMPACT_CQE_OFFLOAD_##member##_MASK) + +#define RQ_COMPACT_CQE_16BYTE 0 +#define RQ_COMPACT_CQE_8BYTE 1 + struct hinic3_rq_cqe { u32 status; u32 vlan_len; @@ -149,6 +193,26 @@ struct hinic3_rq_cqe { u32 pkt_info; };
+struct hinic3_cqe_info { + u8 lro_num; + u8 vlan_offload; + u8 pkt_fmt; + u8 ip_type; + + u8 pkt_type; + u8 cqe_len; + u8 cqe_type; + u8 ts_flag; + + u16 csum_err; + u16 vlan_tag; + + u16 pkt_len; + u16 rss_type; + + u32 rss_hash_value; +}; + struct hinic3_sge_sect { struct hinic3_sge sge; u32 rsvd; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss_cfg.c index 175c4d6..071418d 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rss_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss_cfg.c @@ -18,6 +18,7 @@ #include "hinic3_nic_cmd.h" #include "hinic3_hw.h" #include "hinic3_nic.h" +#include "hinic3_nic_cmdq.h" #include "hinic3_common.h"
static int hinic3_rss_cfg_hash_key(struct hinic3_nic_io *nic_io, u8 opcode, @@ -79,32 +80,31 @@ int hinic3_rss_get_indir_tbl(void *hwdev, u32 *indir_table) { struct hinic3_cmd_buf *cmd_buf = NULL; struct hinic3_nic_io *nic_io = NULL; - u16 *indir_tbl = NULL; - int err, i; + u8 cmd; + int err;
- if (!hwdev || !indir_table) + if ((hwdev == NULL) || (indir_table == NULL)) return -EINVAL;
nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (nic_io == NULL) + return -EINVAL; cmd_buf = hinic3_alloc_cmd_buf(hwdev); - if (!cmd_buf) { + if (cmd_buf == NULL) { nic_err(nic_io->dev_hdl, "Failed to allocate cmd_buf.\n"); return -ENOMEM; }
- cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); + cmd = nic_io->cmdq_ops->prepare_cmd_buf_get_rss_indir_table(nic_io, cmd_buf); err = hinic3_cmdq_detail_resp(hwdev, HINIC3_MOD_L2NIC, - HINIC3_UCODE_CMD_GET_RSS_INDIR_TABLE, - cmd_buf, cmd_buf, NULL, 0, + cmd, cmd_buf, cmd_buf, NULL, 0, HINIC3_CHANNEL_NIC); - if (err) { + if (err != 0) { nic_err(nic_io->dev_hdl, "Failed to get rss indir table\n"); goto get_indir_tbl_failed; }
- indir_tbl = (u16 *)cmd_buf->buf; - for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) - indir_table[i] = *(indir_tbl + i); + nic_io->cmdq_ops->cmd_buf_to_rss_indir_table(cmd_buf, indir_table);
get_indir_tbl_failed: hinic3_free_cmd_buf(hwdev, cmd_buf); @@ -114,41 +114,30 @@ get_indir_tbl_failed:
int hinic3_rss_set_indir_tbl(void *hwdev, const u32 *indir_table) { - struct nic_rss_indirect_tbl *indir_tbl = NULL; struct hinic3_cmd_buf *cmd_buf = NULL; struct hinic3_nic_io *nic_io = NULL; - u32 *temp = NULL; - u32 i, size; + u8 cmd; u64 out_param = 0; int err;
- if (!hwdev || !indir_table) + if ((hwdev == NULL) || (indir_table == NULL)) return -EINVAL;
nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC); + if (nic_io == NULL) + return -EINVAL; + cmd_buf = hinic3_alloc_cmd_buf(hwdev); - if (!cmd_buf) { + if (cmd_buf == NULL) { nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); return -ENOMEM; }
- cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); - indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf; - memset(indir_tbl, 0, sizeof(*indir_tbl)); - - for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) - indir_tbl->entry[i] = (u16)(*(indir_table + i)); - - size = sizeof(indir_tbl->entry) / sizeof(u32); - temp = (u32 *)indir_tbl->entry; - for (i = 0; i < size; i++) - temp[i] = cpu_to_be32(temp[i]); + cmd = nic_io->cmdq_ops->prepare_cmd_buf_set_rss_indir_table(nic_io, indir_table, cmd_buf);
err = hinic3_cmdq_direct_resp(hwdev, HINIC3_MOD_L2NIC, - HINIC3_UCODE_CMD_SET_RSS_INDIR_TABLE, - cmd_buf, &out_param, 0, - HINIC3_CHANNEL_NIC); - if (err || out_param != 0) { + cmd, cmd_buf, &out_param, 0, HINIC3_CHANNEL_NIC); + if ((err != 0) || (out_param != 0)) { nic_err(nic_io->dev_hdl, "Failed to set rss indir table\n"); err = -EFAULT; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c index a7b121b..41c3d9e 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c @@ -32,10 +32,6 @@ #include "hinic3_rss.h" #include "hinic3_rx.h"
-static u32 rq_pi_rd_en; -module_param(rq_pi_rd_en, uint, 0644); -MODULE_PARM_DESC(rq_pi_rd_en, "Enable rq read pi from host, defaut update pi by doorbell (default=0)"); - /* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ #define HINIC3_RX_HDR_SIZE 256 #define HINIC3_RX_BUFFER_WRITE 16 @@ -51,6 +47,9 @@ MODULE_PARM_DESC(rq_pi_rd_en, "Enable rq read pi from host, defaut update pi by #define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0 #define HINIC3_RX_PKT_FORMAT_VXLAN 1
+#define HINIC3_RX_COMPACT_CSUM_OTHER_ERROR 2 +#define HINIC3_RX_COMPACT_HW_BYPASS_ERROR 3 + #define RXQ_STATS_INC(rxq, field) \ do { \ u64_stats_update_begin(&(rxq)->rxq_stats.syncp); \ @@ -164,18 +163,11 @@ static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) }
if (likely(i)) { - if (!rq_pi_rd_en) { - hinic3_write_db(rxq->rq, - rxq->q_id & 3, - RQ_CFLAG_DP, - (u16)((u32)rxq->next_to_update << - rxq->rq->wqe_type)); - } else { - /* Write all the wqes before pi update */ - wmb(); - - hinic3_update_rq_hw_pi(rxq->rq, rxq->next_to_update); - } + hinic3_write_db(rxq->rq, + rxq->q_id & 3, + RQ_CFLAG_DP, + (u16)((u32)rxq->next_to_update << + rxq->rq->wqe_type)); rxq->delta -= i; rxq->next_to_alloc = rxq->next_to_update; } else if (free_wqebbs == rxq->q_depth - 1) { @@ -355,12 +347,13 @@ static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *head_skb, (((pkt_len) & ((rxq)->buf_len - 1)) ? 1 : 0)))
static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq, - u32 pkt_len) + const struct hinic3_cqe_info *cqe_info) { struct sk_buff *head_skb = NULL; struct sk_buff *cur_skb = NULL; struct sk_buff *skb = NULL; struct net_device *netdev = rxq->netdev; + u32 pkt_len = cqe_info->pkt_len; u8 sge_num, skb_num; u16 wqebb_cnt = 0;
@@ -603,40 +596,34 @@ static void hinic3_pull_tail(struct sk_buff *skb) skb->tail += pull_len; }
-static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type, - u32 status, struct sk_buff *skb) +static void hinic3_rx_csum(struct hinic3_rxq *rxq, const struct hinic3_cqe_info *cqe_info, + struct sk_buff *skb) { struct net_device *netdev = rxq->netdev; - u32 pkt_type = HINIC3_GET_RX_PKT_TYPE(offload_type); - u32 ip_type = HINIC3_GET_RX_IP_TYPE(offload_type); - u32 pkt_fmt = HINIC3_GET_RX_TUNNEL_PKT_FORMAT(offload_type);
- u32 csum_err; - - csum_err = HINIC3_GET_RX_CSUM_ERR(status); - if (unlikely(csum_err == HINIC3_RX_CSUM_IPSU_OTHER_ERR)) + if (unlikely(cqe_info->csum_err == HINIC3_RX_CSUM_IPSU_OTHER_ERR)) rxq->rxq_stats.other_errors++;
if (!(netdev->features & NETIF_F_RXCSUM)) return;
- if (unlikely(csum_err)) { + if (unlikely(cqe_info->csum_err)) { /* pkt type is recognized by HW, and csum is wrong */ - if (!(csum_err & (HINIC3_RX_CSUM_HW_CHECK_NONE | - HINIC3_RX_CSUM_IPSU_OTHER_ERR))) + if (!(cqe_info->csum_err & (HINIC3_RX_CSUM_HW_CHECK_NONE | + HINIC3_RX_CSUM_IPSU_OTHER_ERR))) rxq->rxq_stats.csum_errors++; skb->ip_summed = CHECKSUM_NONE; return; }
- if (ip_type == HINIC3_RX_INVALID_IP_TYPE || - !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL || - pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) { + if (cqe_info->ip_type == HINIC3_RX_INVALID_IP_TYPE || + !(cqe_info->pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL || + cqe_info->pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) { skb->ip_summed = CHECKSUM_NONE; return; }
- switch (pkt_type) { + switch (cqe_info->pkt_type) { case HINIC3_RX_TCP_PKT: case HINIC3_RX_UDP_PKT: case HINIC3_RX_SCTP_PKT: @@ -802,24 +789,21 @@ unlock_rcu: } #endif
-static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, - u32 pkt_len, u32 vlan_len, u32 status) +static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_cqe_info *cqe_info) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; struct net_device *netdev = rxq->netdev; - u32 offload_type; - u16 num_lro; struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
#ifdef HAVE_XDP_SUPPORT u32 xdp_status;
- xdp_status = hinic3_run_xdp(rxq, pkt_len); + xdp_status = (u32)(hinic3_run_xdp(rxq, cqe_info->pkt_len)); if (xdp_status == HINIC3_XDP_PKT_DROP) return 0; #endif
- skb = hinic3_fetch_rx_buffer(rxq, pkt_len); + skb = hinic3_fetch_rx_buffer(rxq, cqe_info); if (unlikely(!skb)) { RXQ_STATS_INC(rxq, alloc_skb_err); return -ENOMEM; @@ -829,32 +813,26 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, if (skb_is_nonlinear(skb)) hinic3_pull_tail(skb);
- offload_type = hinic3_hw_cpu32(rx_cqe->offload_type); - hinic3_rx_csum(rxq, offload_type, status, skb); + hinic3_rx_csum(rxq, cqe_info, skb);
#ifdef HAVE_SKBUFF_CSUM_LEVEL - hinic3_rx_gro(rxq, offload_type, skb); + hinic3_rx_gro(rxq, cqe_info->pkt_fmt, skb); #endif
#if defined(NETIF_F_HW_VLAN_CTAG_RX) - if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - HINIC3_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && cqe_info->vlan_offload) { #else - if ((netdev->features & NETIF_F_HW_VLAN_RX) && - HINIC3_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { + if ((netdev->features & NETIF_F_HW_VLAN_RX) && cqe_info->vlan_offload) { #endif - u16 vid = HINIC3_GET_RX_VLAN_TAG(vlan_len); - /* if the packet is a vlan pkt, the vid may be 0 */ - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), cqe_info->vlan_tag); }
if (unlikely(test_bit(HINIC3_LP_TEST, &nic_dev->flags))) hinic3_copy_lp_data(nic_dev, skb);
- num_lro = HINIC3_GET_RX_NUM_LRO(status); - if (num_lro) - hinic3_lro_set_gso_params(skb, num_lro); + if (cqe_info->lro_num) + hinic3_lro_set_gso_params(skb, cqe_info->lro_num);
skb_record_rx_queue(skb, rxq->q_id); skb->protocol = eth_type_trans(skb, netdev); @@ -879,40 +857,112 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, (HINIC3_GET_RX_IP_TYPE(hinic3_hw_cpu32((cqe)->offload_type)) == \ HINIC3_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
+void hinic3_rx_get_cqe_info(void *rx_cqe, void *cqe_info) +{ + struct hinic3_rq_cqe *cqe = (struct hinic3_rq_cqe *)rx_cqe; + struct hinic3_cqe_info *info = (struct hinic3_cqe_info *)cqe_info; + u32 dw0 = hinic3_hw_cpu32(cqe->status); + u32 dw1 = hinic3_hw_cpu32(cqe->vlan_len); + u32 dw2 = hinic3_hw_cpu32(cqe->offload_type); + u32 dw3 = hinic3_hw_cpu32(cqe->hash_val); + + info->lro_num = RQ_CQE_STATUS_GET(dw0, NUM_LRO); + info->csum_err = RQ_CQE_STATUS_GET(dw0, CSUM_ERR); + + info->pkt_len = RQ_CQE_SGE_GET(dw1, LEN); + info->vlan_tag = RQ_CQE_SGE_GET(dw1, VLAN); + + info->pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(dw2, PKT_TYPE); + info->ip_type = RQ_CQE_OFFOLAD_TYPE_GET(dw0, IP_TYPE); + info->pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(dw2, TUNNEL_PKT_FORMAT); + info->vlan_offload = RQ_CQE_OFFOLAD_TYPE_GET(dw2, VLAN_EN); + info->rss_type = RQ_CQE_OFFOLAD_TYPE_GET(dw2, RSS_TYPE); + info->rss_hash_value = dw3; +} + +void hinic3_rx_get_compact_cqe_info(void *rx_cqe, void *cqe_info) +{ + struct hinic3_rq_cqe *cqe = (struct hinic3_rq_cqe *)rx_cqe; + struct hinic3_cqe_info *info = (struct hinic3_cqe_info *)cqe_info; + u32 dw0, dw1, dw2; + + dw0 = hinic3_hw_cpu32(cqe->status); + dw1 = hinic3_hw_cpu32(cqe->vlan_len); + dw2 = hinic3_hw_cpu32(cqe->offload_type); + + info->cqe_type = RQ_COMPACT_CQE_STATUS_GET(dw0, CQE_TYPE); + info->csum_err = RQ_COMPACT_CQE_STATUS_GET(dw0, CSUM_ERR); + info->vlan_offload = RQ_COMPACT_CQE_STATUS_GET(dw0, VLAN_EN); + info->pkt_fmt = RQ_COMPACT_CQE_STATUS_GET(dw0, PKT_FORMAT); + info->ip_type = RQ_COMPACT_CQE_STATUS_GET(dw0, IP_TYPE); + info->cqe_len = RQ_COMPACT_CQE_STATUS_GET(dw0, CQE_LEN); + info->pkt_type = RQ_COMPACT_CQE_STATUS_GET(dw0, PKT_TYPE); + info->pkt_len = RQ_COMPACT_CQE_STATUS_GET(dw0, PKT_LEN); + info->ts_flag = RQ_COMPACT_CQE_STATUS_GET(dw0, TS_FLAG); + info->rss_hash_value = dw1; + + switch (info->csum_err) { + case HINIC3_RX_COMPACT_CSUM_OTHER_ERROR: + info->csum_err = HINIC3_RX_CSUM_IPSU_OTHER_ERR; + break; + case HINIC3_RX_COMPACT_HW_BYPASS_ERROR: + info->csum_err = HINIC3_RX_CSUM_HW_CHECK_NONE; + break; + default: + break; + } + + if (info->cqe_len == RQ_COMPACT_CQE_16BYTE) { + info->lro_num = RQ_COMPACT_CQE_OFFLOAD_GET(dw2, NUM_LRO); + info->vlan_tag = RQ_COMPACT_CQE_OFFLOAD_GET(dw2, VLAN); + } +} + +static bool hinic3_rx_cqe_done(void *rx_queue, void **rx_cqe) +{ + u32 sw_ci, status = 0; + struct hinic3_rxq *rxq = rx_queue; + struct hinic3_rq_cqe *cqe = NULL; + + sw_ci = rxq->cons_idx & rxq->q_mask; + *rx_cqe = rxq->rx_info[sw_ci].cqe; + cqe = (struct hinic3_rq_cqe *) *rx_cqe; + + status = hinic3_hw_cpu32(cqe->status); + if (HINIC3_GET_RX_DONE(status) == 0) + return false; + + return true; +} + int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) { struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev); - u32 sw_ci, status, pkt_len, vlan_len, dropped = 0; + u32 dropped = 0; struct hinic3_rq_cqe *rx_cqe = NULL; + struct hinic3_cqe_info cqe_info = { 0 }; u64 rx_bytes = 0; - u16 num_lro; int pkts = 0, nr_pkts = 0; u16 num_wqe = 0;
while (likely(pkts < budget)) { - sw_ci = rxq->cons_idx & rxq->q_mask; - rx_cqe = rxq->rx_info[sw_ci].cqe; - status = hinic3_hw_cpu32(rx_cqe->status); - if (!HINIC3_GET_RX_DONE(status)) + if (!nic_dev->tx_rx_ops.rx_cqe_done(rxq, (void **)&rx_cqe)) break;
/* make sure we read rx_done before packet length */ rmb();
- vlan_len = hinic3_hw_cpu32(rx_cqe->vlan_len); - pkt_len = HINIC3_GET_RX_PKT_LEN(vlan_len); - if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status)) + nic_dev->tx_rx_ops.rx_get_cqe_info(rx_cqe, &cqe_info); + if (recv_one_pkt(rxq, &cqe_info)) break;
- rx_bytes += pkt_len; + rx_bytes += cqe_info.pkt_len; pkts++; nr_pkts++;
- num_lro = HINIC3_GET_RX_NUM_LRO(status); - if (num_lro) { - rx_bytes += ((num_lro - 1) * LRO_PKT_HDR_LEN(rx_cqe)); - - num_wqe += HINIC3_GET_SGE_NUM(pkt_len, rxq); + if (cqe_info.lro_num) { + rx_bytes += ((cqe_info.lro_num - 1) * LRO_PKT_HDR_LEN(rx_cqe)); + num_wqe += HINIC3_GET_SGE_NUM(cqe_info.pkt_len, rxq); }
rx_cqe->status = 0; @@ -941,6 +991,8 @@ int hinic3_alloc_rxqs_res(struct hinic3_nic_dev *nic_dev, u16 num_rq, u32 pkts; u64 size;
+ nic_dev->tx_rx_ops.rx_cqe_done = hinic3_rx_cqe_done; + for (idx = 0; idx < num_rq; idx++) { rqres = &rxqs_res[idx]; size = sizeof(*rqres->rx_info) * rq_depth; @@ -1237,15 +1289,8 @@ int rxq_restore(struct hinic3_nic_dev *nic_dev, u16 q_id, u16 hw_ci) return err; }
- if (!rq_pi_rd_en) { - hinic3_write_db(rxq->rq, rxq->q_id & (NIC_DCB_COS_MAX - 1), - RQ_CFLAG_DP, (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); - } else { - /* Write all the wqes before pi update */ - wmb(); - - hinic3_update_rq_hw_pi(rxq->rq, rxq->next_to_update); - } + hinic3_write_db(rxq->rq, rxq->q_id & (NIC_DCB_COS_MAX - 1), + RQ_CFLAG_DP, (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type));
return 0; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h index f4d6f4f..9064177 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h @@ -150,6 +150,10 @@ void hinic3_rxq_get_stats(struct hinic3_rxq *rxq,
void hinic3_rxq_clean_stats(struct hinic3_rxq_stats *rxq_stats);
+void hinic3_rx_get_cqe_info(void *rx_cqe, void *cqe_info); + +void hinic3_rx_get_compact_cqe_info(void *rx_cqe, void *cqe_info); + void hinic3_rxq_check_work_handler(struct work_struct *work);
#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c index e30da63..7ea8375 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c @@ -532,7 +532,7 @@ static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq, }
return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, - num_sge + (u16)!!offload); + num_sge + (u16)!!offload); }
/* * diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c index ea41e36..b3ad47d 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c @@ -16,7 +16,7 @@ #include <linux/module.h>
#include "ossl_knl.h" -#include "npu_cmdq_base_defs.h" + #include "hinic3_crm.h" #include "hinic3_hw.h" #include "hinic3_hwdev.h" @@ -25,11 +25,11 @@ #include "hinic3_wq.h" #include "hinic3_hw_comm.h" #include "hinic3_hwif.h" +#include "npu_cmdq_base_defs.h" #include "hinic3_cmdq.h"
-#define HINIC3_CMDQ_BUF_SIZE 2048U - #define CMDQ_CMD_TIMEOUT 5000 /* millisecond */ +#define CMDQ_CMD_RETRY_TIMEOUT 1000U
#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) #define LOWER_8_BITS(data) ((data) & 0xFF) @@ -165,18 +165,17 @@ #define CMDQ_DB_ADDR(db_base, pi) \ (((u8 *)(db_base)) + CMDQ_DB_PI_OFF(pi))
-#define CMDQ_PFN_SHIFT 12 -#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) - #define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
#define WQE_LCMD_SIZE 64 #define WQE_SCMD_SIZE 64 +#define WQE_ENHANCED_CMDQ_SIZE 32
#define COMPLETE_LEN 3
#define CMDQ_WQEBB_SIZE 64 #define CMDQ_WQE_SIZE 64 +#define ENHANCE_CMDQ_WQEBB_SIZE 16
#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ struct hinic3_cmdqs, cmdq[0]) @@ -199,16 +198,6 @@ enum ctrl_sect_len { CTRL_DIRECT_SECT_LEN = 2, };
-enum bufdesc_len { - BUFDESC_LCMD_LEN = 2, - BUFDESC_SCMD_LEN = 3, -}; - -enum data_format { - DATA_SGE, - DATA_DIRECT, -}; - enum completion_format { COMPLETE_DIRECT, COMPLETE_SGE, @@ -218,13 +207,8 @@ enum completion_request { CEQ_SET = 1, };
-enum cmdq_cmd_type { - SYNC_CMD_DIRECT_RESP, - SYNC_CMD_SGE_RESP, - ASYNC_CMD, -}; - #define NUM_WQEBBS_FOR_CMDQ_WQE 1 +#define NUM_WQEBBS_FOR_ENHANCE_CMDQ_WQE 4
bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq) { @@ -239,12 +223,21 @@ static void *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci) return hinic3_wq_read_one_wqebb(wq, ci); }
-static void *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi) +static void *hinic3_wq_get_align_wqebbs(struct hinic3_wq *wq, u16 *pi, u16 wqebb_num) +{ + *pi = WQ_MASK_IDX(wq, wq->prod_idx); + wq->prod_idx += wqebb_num; + + return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, *pi), + WQ_OFFSET_IN_PAGE(wq, *pi)); +} + +static void *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi, u16 wqebb_use_num) { - if (!hinic3_wq_free_wqebbs(wq)) + if (hinic3_wq_free_wqebbs(wq) < wqebb_use_num) return NULL;
- return hinic3_wq_get_one_wqebb(wq, pi); + return hinic3_wq_get_align_wqebbs(wq, pi, wqebb_use_num); }
struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(void *hwdev) @@ -260,10 +253,16 @@ struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(void *hwdev)
cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; dev = ((struct hinic3_hwdev *)hwdev)->dev_hdl; + if (cmdqs == NULL || dev == NULL) { + pr_err("Failed to alloc cmd buf, Invalid hwdev cmdqs or dev\n"); + return NULL; + }
cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); - if (!cmd_buf) + if (!cmd_buf) { + sdk_err(dev, "Failed to allocate cmd buf\n"); return NULL; + }
cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, &cmd_buf->dma_addr); @@ -272,7 +271,7 @@ struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(void *hwdev) goto alloc_pci_buf_err; }
- cmd_buf->size = HINIC3_CMDQ_BUF_SIZE; + cmd_buf->size = (u16)cmdqs->cmd_buf_size; atomic_set(&cmd_buf->ref_cnt, 1);
return cmd_buf; @@ -307,8 +306,7 @@ static void cmdq_set_completion(struct hinic3_cmdq_completion *complete, { struct hinic3_sge_resp *sge_resp = &complete->sge_resp;
- hinic3_set_sge(&sge_resp->sge, buf_out->dma_addr, - HINIC3_CMDQ_BUF_SIZE); + hinic3_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size); }
static void cmdq_set_lcmd_bufdesc(struct hinic3_cmdq_wqe_lcmd *wqe, @@ -343,11 +341,11 @@ static void cmdq_set_db(struct hinic3_cmdq *cmdq, writeq(*((u64 *)&db), CMDQ_DB_ADDR(db_base, prod_idx)); }
-static void cmdq_wqe_fill(void *dst, const void *src) +static void cmdq_wqe_fill(void *dst, const void *src, int wqe_size) { memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, (u8 *)src + FIRST_DATA_TO_WRITE_LAST, - CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + wqe_size - FIRST_DATA_TO_WRITE_LAST);
wmb(); /* The first 8 bytes should be written last */
@@ -405,7 +403,7 @@ static void cmdq_prepare_wqe_ctrl(struct hinic3_cmdq_wqe *wqe, int wrapped, }
static void cmdq_set_lcmd_wqe(struct hinic3_cmdq_wqe *wqe, - enum cmdq_cmd_type cmd_type, + enum hinic3_cmdq_cmd_type cmd_type, struct hinic3_cmd_buf *buf_in, struct hinic3_cmd_buf *buf_out, int wrapped, u8 mod, u8 cmd, u16 prod_idx) @@ -414,20 +412,22 @@ static void cmdq_set_lcmd_wqe(struct hinic3_cmdq_wqe *wqe, enum completion_format complete_format = COMPLETE_DIRECT;
switch (cmd_type) { - case SYNC_CMD_DIRECT_RESP: + case HINIC3_CMD_TYPE_DIRECT_RESP: wqe_lcmd->completion.direct_resp = 0; break; - case SYNC_CMD_SGE_RESP: + case HINIC3_CMD_TYPE_SGE_RESP: if (buf_out) { complete_format = COMPLETE_SGE; cmdq_set_completion(&wqe_lcmd->completion, buf_out); } break; - case ASYNC_CMD: + case HINIC3_CMD_TYPE_ASYNC: wqe_lcmd->completion.direct_resp = 0; wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); break; + default: + break; }
cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format, @@ -439,36 +439,75 @@ static void cmdq_set_lcmd_wqe(struct hinic3_cmdq_wqe *wqe, static void cmdq_update_cmd_status(struct hinic3_cmdq *cmdq, u16 prod_idx, struct hinic3_cmdq_wqe *wqe) { - struct hinic3_cmdq_cmd_info *cmd_info; - struct hinic3_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic3_cmdq_cmd_info *cmd_info = NULL; + struct hinic3_cmdq_wqe_lcmd *wqe_lcmd = NULL; u32 status_info; + u64 *direct_resp = NULL; + u32 error_status;
- wqe_lcmd = &wqe->wqe_lcmd; cmd_info = &cmdq->cmd_infos[prod_idx];
- if (cmd_info->errcode) { + if (!cmd_info->errcode) { + sdk_err(cmdq->hwdev->dev_hdl, "cmd_info->errcode = NULL\n"); + return; + } + + if (cmdq->hwdev->cmdq_mode == HINIC3_NORMAL_CMDQ) { + wqe_lcmd = &wqe->wqe_lcmd; status_info = hinic3_hw_cpu32(wqe_lcmd->status.status_info); *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); - }
- if (cmd_info->direct_resp) - *cmd_info->direct_resp = - hinic3_hw_cpu32(wqe_lcmd->completion.direct_resp); + if (cmd_info->direct_resp) { + *cmd_info->direct_resp = hinic3_hw_cpu32(wqe_lcmd->completion.direct_resp); + if ((*cmd_info->errcode != 0) && (*cmd_info->direct_resp != 0)) { + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq resp err=0x%llx\n", + *cmd_info->direct_resp); + } + } + } else { + status_info = hinic3_hw_cpu32(wqe->enhanced_cmdq_wqe.completion.cs_format); + *cmd_info->errcode = ENHANCE_CMDQ_WQE_CS_GET(status_info, ERR_CODE); + if (*cmd_info->errcode != 0) { + error_status = + hinic3_hw_cpu32(wqe->enhanced_cmdq_wqe.completion.sge_resp_hi_addr); + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq error code 0x%x, error status 0x%x\n", + *cmd_info->errcode, error_status); + } + + if (cmd_info->direct_resp) { + direct_resp = (u64 *)(&wqe->enhanced_cmdq_wqe.completion.sge_resp_lo_addr); + *cmd_info->direct_resp = hinic3_hw_cpu32(*direct_resp); + if ((*cmd_info->errcode != 0) && (*cmd_info->direct_resp != 0)) { + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq resp err=0x%llx\n", + *cmd_info->direct_resp); + } + } + } }
static int hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq *cmdq, struct hinic3_cmdq_wqe *wqe, u16 pi) { - struct hinic3_cmdq_wqe_lcmd *wqe_lcmd; - struct hinic3_ctrl *ctrl; + struct hinic3_cmdq_wqe_lcmd *wqe_lcmd = NULL; + struct hinic3_ctrl *ctrl = NULL; u32 ctrl_info;
- wqe_lcmd = &wqe->wqe_lcmd; - ctrl = &wqe_lcmd->ctrl; - ctrl_info = hinic3_hw_cpu32((ctrl)->ctrl_info); - if (!WQE_COMPLETED(ctrl_info)) { - sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set\n"); - return -EFAULT; + if (cmdq->hwdev->cmdq_mode == HINIC3_NORMAL_CMDQ) { + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = hinic3_hw_cpu32((ctrl)->ctrl_info); + + if (WQE_COMPLETED(ctrl_info) == 0) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set\n"); + return -EFAULT; + } + } else { + ctrl_info = hinic3_hw_cpu32(wqe->enhanced_cmdq_wqe.completion.cs_format); + if (ENHANCE_CMDQ_WQE_CS_GET(ctrl_info, HW_BUSY) == 0) { + sdk_info(cmdq->hwdev->dev_hdl, "enhance Cmdq sync command check busy bit not set\n"); + return -EFAULT; + } }
cmdq_update_cmd_status(cmdq, pi, wqe); @@ -490,6 +529,58 @@ static void clear_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info, cmd_info->direct_resp = NULL; }
+static int wait_for_cmdq_timeout(struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_cmd_info *cmd_info, + ulong timeout) +{ + ulong timeo, end; + + if (cmdq->cmdqs->poll) { + end = jiffies + msecs_to_jiffies((unsigned int)timeout); + while (time_before(jiffies, end)) { + /* must lock cmdq when poll cqe handle */ + spin_lock_bh(&cmdq->cmdq_lock); + hinic3_cmdq_ceq_handler(cmdq->hwdev, 0); + spin_unlock_bh(&cmdq->cmdq_lock); + + if (try_wait_for_completion(cmd_info->done) != 0) + return 0; + + usleep_range(9, 10); /* sleep 9 us ~ 10 us */ + } + } else { + timeo = msecs_to_jiffies((unsigned int)timeout); + if (wait_for_completion_timeout(cmd_info->done, timeo) != 0) + return 0; + } + + return -ETIMEDOUT; +} + +static int cmdq_retry_get_ack(struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_cmd_info *cmd_info, u8 ceq_id) +{ + ulong retry_timeout = msecs_to_jiffies(CMDQ_CMD_RETRY_TIMEOUT); + int err; + + spin_lock_bh(&cmdq->cmdq_lock); + if (try_wait_for_completion(cmd_info->done)) { + spin_unlock_bh(&cmdq->cmdq_lock); + return 0; + } + reinit_completion(cmd_info->done); + spin_unlock_bh(&cmdq->cmdq_lock); + + err = hinic3_reschedule_eq(cmdq->hwdev, HINIC3_CEQ, ceq_id); + if (err != 0) + return err; + + if (wait_for_cmdq_timeout(cmdq, cmd_info, retry_timeout) == 0) + return 0; + + return -ETIMEDOUT; +} + static int cmdq_ceq_handler_status(struct hinic3_cmdq *cmdq, struct hinic3_cmdq_cmd_info *cmd_info, struct hinic3_cmdq_cmd_info *saved_cmd_info, @@ -497,20 +588,20 @@ static int cmdq_ceq_handler_status(struct hinic3_cmdq *cmdq, struct hinic3_cmdq_wqe *curr_wqe, u32 timeout) { - ulong timeo; int err; - ulong end = jiffies + msecs_to_jiffies(timeout);
- if (cmdq->hwdev->poll) { - while (time_before(jiffies, end)) { - hinic3_cmdq_ceq_handler(cmdq->hwdev, 0); - if (saved_cmd_info->done->done != 0) - return 0; - usleep_range(9, 10); /* sleep 9 us ~ 10 us */ - } - } else { - timeo = msecs_to_jiffies(timeout); - if (wait_for_completion_timeout(saved_cmd_info->done, timeo)) + err = wait_for_cmdq_timeout(cmdq, saved_cmd_info, timeout); + if (err == 0) + return 0; + + if (!cmdq->cmdqs->poll) { + sdk_warn(cmdq->hwdev->dev_hdl, + "Cmdq retry cmd(type %u, channel %u), msg_id %llu, pi %u\n", + saved_cmd_info->cmd_type, saved_cmd_info->channel, curr_msg_id, + curr_prod_idx); + + err = cmdq_retry_get_ack(cmdq, saved_cmd_info, HINIC3_CEQ_ID_CMDQ); + if (err == 0) return 0; }
@@ -528,7 +619,7 @@ static int cmdq_ceq_handler_status(struct hinic3_cmdq *cmdq, if (curr_msg_id == cmd_info->cmdq_msg_id) { err = hinic3_cmdq_sync_timeout_check(cmdq, curr_wqe, curr_prod_idx); - if (err) + if (err != 0) cmd_info->cmd_type = HINIC3_CMD_TYPE_TIMEOUT; else cmd_info->cmd_type = HINIC3_CMD_TYPE_FAKE_TIMEOUT; @@ -563,6 +654,8 @@ static int wait_cmdq_sync_cmd_completion(struct hinic3_cmdq *cmdq, static int cmdq_msg_lock(struct hinic3_cmdq *cmdq, u16 channel) { struct hinic3_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + if (cmdqs == NULL) + return -EINVAL;
/* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ spin_lock_bh(&cmdq->cmdq_lock); @@ -593,6 +686,18 @@ static void cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info, cmd_info->buf_out = NULL; }
+static void cmdq_update_next_prod_idx(struct hinic3_cmdq *cmdq, u16 curr_pi, u16 *next_pi, + u16 wqebb_use_num) +{ + u16 q_depth = (u16)cmdq->wq.q_depth; + + *next_pi = curr_pi + wqebb_use_num; + if (*next_pi >= q_depth) { + cmdq->wrapped = (cmdq->wrapped == 0) ? 1 : 0; + *next_pi -= (u16)q_depth; + } +} + static void cmdq_set_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info, struct hinic3_hwdev *hwdev, struct hinic3_cmd_buf *buf_in, @@ -608,170 +713,119 @@ static void cmdq_set_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info, atomic_inc(&buf_out->ref_cnt); }
-static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, - u8 cmd, struct hinic3_cmd_buf *buf_in, - u64 *out_param, u32 timeout, u16 channel) +static void cmdq_sync_wqe_prepare(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, struct hinic3_cmd_buf *buf_out, + struct hinic3_cmdq_wqe *curr_wqe, u16 curr_pi, + enum hinic3_cmdq_cmd_type nic_cmd_type) { - struct hinic3_wq *wq = &cmdq->wq; - struct hinic3_cmdq_wqe *curr_wqe = NULL, wqe; - struct hinic3_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; - struct completion done; - u16 curr_prod_idx, next_prod_idx; - int wrapped, errcode = 0, wqe_size = WQE_LCMD_SIZE; - int cmpt_code = CMDQ_SEND_CMPT_CODE; - u64 curr_msg_id; - int err; - u32 real_timeout; + struct hinic3_cmdq_wqe wqe; + struct hinic3_cmdq_cmd_param cmd_buf; + int wrapped, wqe_size;
- err = cmdq_msg_lock(cmdq, channel); - if (err) - return err; + if (cmdq->cmdqs->cmdq_mode == HINIC3_ENHANCE_CMDQ) { + wqe_size = WQE_ENHANCED_CMDQ_SIZE;
- curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); - if (!curr_wqe) { - cmdq_msg_unlock(cmdq); - return -EBUSY; + /* enhance cmdq wqe_size aligned with 64 */ + wqe_size = ALIGN(wqe_size, 64); + } else { + wqe_size = WQE_LCMD_SIZE; }
- memset(&wqe, 0, sizeof(wqe)); + memset(&wqe, 0, (u32)wqe_size);
wrapped = cmdq->wrapped;
- next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = (cmdq->wrapped == 0) ? 1 : 0; - next_prod_idx -= (u16)wq->q_depth; + if (cmdq->cmdqs->cmdq_mode == HINIC3_NORMAL_CMDQ) { + cmdq_set_lcmd_wqe(&wqe, nic_cmd_type, buf_in, buf_out, wrapped, mod, cmd, curr_pi); + } else { + cmd_buf.buf_in = buf_in; + cmd_buf.buf_out = buf_out; + cmd_buf.cmd = cmd; + cmd_buf.mod = mod; + enhanced_cmdq_set_wqe(&wqe, nic_cmd_type, &cmd_buf, wrapped); }
- cmd_info = &cmdq->cmd_infos[curr_prod_idx]; - - init_completion(&done); - - cmd_info->cmd_type = HINIC3_CMD_TYPE_DIRECT_RESP; - cmd_info->done = &done; - cmd_info->errcode = &errcode; - cmd_info->direct_resp = out_param; - cmd_info->cmpt_code = &cmpt_code; - cmd_info->channel = channel; - cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, NULL); - - memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); - - cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, - wrapped, mod, cmd, curr_prod_idx); - /* The data that is written to HW should be in Big Endian Format */ hinic3_hw_be32_len(&wqe, wqe_size);
- /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ - cmdq_wqe_fill(curr_wqe, &wqe); - - (cmd_info->cmdq_msg_id)++; - curr_msg_id = cmd_info->cmdq_msg_id; - - cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx); - - cmdq_msg_unlock(cmdq); - - real_timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; - err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, - curr_msg_id, curr_prod_idx, - curr_wqe, real_timeout); - if (err) { - sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", - mod, cmd, curr_prod_idx); - err = -ETIMEDOUT; - } - - if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { - sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", - mod, cmd); - err = -EAGAIN; - } + cmdq_wqe_fill(curr_wqe, &wqe, wqe_size); +}
- destroy_completion(&done); - smp_rmb(); /* read error code after completion */ +static inline void hinic3_cmdq_fill_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info, + enum hinic3_cmdq_cmd_type nic_cmd_type, u16 channel, + u16 wqebb_use_num) +{ + cmd_info->cmd_type = nic_cmd_type; + cmd_info->channel = channel; + cmd_info->wqebb_use_num = wqebb_use_num; +}
- return (err != 0) ? err : errcode; +static inline void hinic3_cmdq_fill_completion_info(struct hinic3_cmdq_cmd_info *cmd_info, + int *cmpt_code, struct completion *done, + int *errcode, u64 *out_param) +{ + cmd_info->done = done; + cmd_info->errcode = errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = cmpt_code; }
-static int cmdq_sync_cmd_detail_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, - struct hinic3_cmd_buf *buf_in, - struct hinic3_cmd_buf *buf_out, - u64 *out_param, u32 timeout, u16 channel) +static int cmdq_sync_cmd(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, struct hinic3_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel, + enum hinic3_cmdq_cmd_type nic_cmd_type) { struct hinic3_wq *wq = &cmdq->wq; - struct hinic3_cmdq_wqe *curr_wqe = NULL, wqe; + struct hinic3_cmdq_wqe *curr_wqe = NULL; struct hinic3_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; struct completion done; - u16 curr_prod_idx, next_prod_idx; - int wrapped, errcode = 0, wqe_size = WQE_LCMD_SIZE; + u16 curr_pi, next_pi, wqebb_use_num; + int errcode = 0; int cmpt_code = CMDQ_SEND_CMPT_CODE; u64 curr_msg_id; int err; u32 real_timeout;
err = cmdq_msg_lock(cmdq, channel); - if (err) + if (err != 0) return err;
- curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + wqebb_use_num = cmdq->cmdqs->wqebb_use_num; + curr_wqe = cmdq_get_wqe(wq, &curr_pi, wqebb_use_num); if (!curr_wqe) { cmdq_msg_unlock(cmdq); return -EBUSY; }
- memset(&wqe, 0, sizeof(wqe)); - - wrapped = cmdq->wrapped; - - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = (cmdq->wrapped == 0) ? 1 : 0; - next_prod_idx -= (u16)wq->q_depth; - } - - cmd_info = &cmdq->cmd_infos[curr_prod_idx]; - init_completion(&done); + cmd_info = &cmdq->cmd_infos[curr_pi]; + hinic3_cmdq_fill_cmd_info(cmd_info, nic_cmd_type, channel, wqebb_use_num); + hinic3_cmdq_fill_completion_info(cmd_info, &cmpt_code, &done, &errcode, out_param);
- cmd_info->cmd_type = HINIC3_CMD_TYPE_SGE_RESP; - cmd_info->done = &done; - cmd_info->errcode = &errcode; - cmd_info->direct_resp = out_param; - cmd_info->cmpt_code = &cmpt_code; - cmd_info->channel = channel; cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, buf_out); - memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info));
- cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, - wrapped, mod, cmd, curr_prod_idx); - - hinic3_hw_be32_len(&wqe, wqe_size); - - cmdq_wqe_fill(curr_wqe, &wqe); + cmdq_sync_wqe_prepare(cmdq, mod, cmd, buf_in, buf_out, curr_wqe, curr_pi, nic_cmd_type);
(cmd_info->cmdq_msg_id)++; curr_msg_id = cmd_info->cmdq_msg_id;
- cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); + cmdq_update_next_prod_idx(cmdq, curr_pi, &next_pi, wqebb_use_num); + cmdq_set_db(cmdq, cmdq->cmdq_type, next_pi);
cmdq_msg_unlock(cmdq);
- real_timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; + real_timeout = (timeout != 0) ? timeout : CMDQ_CMD_TIMEOUT; err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, - curr_msg_id, curr_prod_idx, - curr_wqe, real_timeout); - if (err) { - sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", - mod, cmd, curr_prod_idx); + curr_msg_id, curr_pi, curr_wqe, real_timeout); + if (err != 0) { + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync cmd(mod: %u, cmd: %u) timeout, pi: 0x%x\n", + mod, cmd, curr_pi); err = -ETIMEDOUT; }
if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { - sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", - mod, cmd); + sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", mod, cmd); err = -EAGAIN; }
@@ -781,21 +835,44 @@ static int cmdq_sync_cmd_detail_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, return (err != 0) ? err : errcode; }
+static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, u64 *out_param, + u32 timeout, u16 channel) +{ + return cmdq_sync_cmd(cmdq, mod, cmd, buf_in, NULL, + out_param, timeout, channel, + HINIC3_CMD_TYPE_DIRECT_RESP); +} + +static int cmdq_sync_cmd_detail_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic3_cmd_buf *buf_in, + struct hinic3_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel) +{ + return cmdq_sync_cmd(cmdq, mod, cmd, buf_in, buf_out, + out_param, timeout, channel, + HINIC3_CMD_TYPE_SGE_RESP); +} + static int cmdq_async_cmd(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, struct hinic3_cmd_buf *buf_in, u16 channel) { struct hinic3_cmdq_cmd_info *cmd_info = NULL; struct hinic3_wq *wq = &cmdq->wq; - int wqe_size = WQE_LCMD_SIZE; - u16 curr_prod_idx, next_prod_idx; + int wqe_size; + u16 curr_prod_idx, next_prod_idx, wqebb_use_num; struct hinic3_cmdq_wqe *curr_wqe = NULL, wqe; int wrapped, err;
+ wqe_size = cmdq->cmdqs->cmdq_mode == HINIC3_NORMAL_CMDQ ? + WQE_LCMD_SIZE : WQE_ENHANCED_CMDQ_SIZE; + err = cmdq_msg_lock(cmdq, channel); - if (err) + if (err != 0) return err;
- curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); + wqebb_use_num = cmdq->cmdqs->wqebb_use_num; + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx, wqebb_use_num); if (!curr_wqe) { cmdq_msg_unlock(cmdq); return -EBUSY; @@ -804,28 +881,26 @@ static int cmdq_async_cmd(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, memset(&wqe, 0, sizeof(wqe));
wrapped = cmdq->wrapped; - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = (cmdq->wrapped == 0) ? 1 : 0; - next_prod_idx -= (u16)wq->q_depth; - }
- cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped, + cmdq_update_next_prod_idx(cmdq, curr_prod_idx, &next_prod_idx, wqebb_use_num); + + cmdq_set_lcmd_wqe(&wqe, HINIC3_CMD_TYPE_ASYNC, buf_in, NULL, wrapped, mod, cmd, curr_prod_idx);
/* The data that is written to HW should be in Big Endian Format */ hinic3_hw_be32_len(&wqe, wqe_size); - cmdq_wqe_fill(curr_wqe, &wqe); + cmdq_wqe_fill(curr_wqe, &wqe, wqe_size);
cmd_info = &cmdq->cmd_infos[curr_prod_idx]; cmd_info->cmd_type = HINIC3_CMD_TYPE_ASYNC; cmd_info->channel = channel; + cmd_info->wqebb_use_num = wqebb_use_num; /* The caller will not free the cmd_buf of the asynchronous command, * so there is no need to increase the reference count here */ cmd_info->buf_in = buf_in;
- cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); + cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx);
cmdq_msg_unlock(cmdq);
@@ -834,13 +909,17 @@ static int cmdq_async_cmd(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd,
static int cmdq_params_valid(const void *hwdev, const struct hinic3_cmd_buf *buf_in) { + struct hinic3_cmdqs *cmdqs = NULL; + if (!buf_in || !hwdev) { pr_err("Invalid CMDQ buffer addr or hwdev\n"); return -EINVAL; }
- if (!buf_in->size || buf_in->size > HINIC3_CMDQ_BUF_SIZE) { - pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size); + cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; + if (!cmdqs || (buf_in->size < HINIC3_CMDQ_MIN_BUF_SIZE) || + (buf_in->size > cmdqs->cmd_buf_size)) { + pr_err("Invalid cmdqs addr or CMDQ buffer size: 0x%x\n", buf_in->size); return -EINVAL; }
@@ -851,13 +930,15 @@ static int cmdq_params_valid(const void *hwdev, const struct hinic3_cmd_buf *buf static int wait_cmdqs_enable(struct hinic3_cmdqs *cmdqs) { unsigned long end; + if (cmdqs == NULL) + return -EINVAL;
end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); do { if (cmdqs->status & HINIC3_CMDQ_ENABLE) return 0; } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag && - !cmdqs->disable_flag); + (cmdqs->disable_flag == 0));
cmdqs->disable_flag = 1;
@@ -872,7 +953,7 @@ int hinic3_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, int err;
err = cmdq_params_valid(hwdev, buf_in); - if (err) { + if (err != 0) { pr_err("Invalid CMDQ parameters\n"); return err; } @@ -882,7 +963,7 @@ int hinic3_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd,
cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; err = wait_cmdqs_enable(cmdqs); - if (err) { + if (err != 0) { sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); return err; } @@ -890,8 +971,12 @@ int hinic3_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC], mod, cmd, buf_in, out_param, timeout, channel); + if (err != 0) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq direct_resp fail\n"); + return err; + }
- if (!(((struct hinic3_hwdev *)hwdev)->chip_present_flag)) + if ((((struct hinic3_hwdev *)hwdev)->chip_present_flag) == 0) return -ETIMEDOUT; else return err; @@ -1026,27 +1111,35 @@ static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq, struct hinic3_cmdq_wqe *wqe, u16 ci) { struct hinic3_ctrl *ctrl = NULL; - u32 header_info = hinic3_hw_cpu32(WQE_HEADER(wqe)->header_info); - enum data_format df = CMDQ_WQE_HEADER_GET(header_info, DATA_FMT); + u32 header_info; + enum data_format df; + + if (cmdq->hwdev->cmdq_mode == HINIC3_NORMAL_CMDQ) { + header_info = hinic3_hw_cpu32(WQE_HEADER(wqe)->header_info); + df = CMDQ_WQE_HEADER_GET(header_info, DATA_FMT); + if (df == DATA_SGE) + ctrl = &wqe->wqe_lcmd.ctrl; + else + ctrl = &wqe->inline_wqe.wqe_scmd.ctrl;
- if (df == DATA_SGE) - ctrl = &wqe->wqe_lcmd.ctrl; - else - ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; + ctrl->ctrl_info = 0; /* clear HW busy bit */ + } else { + wqe->enhanced_cmdq_wqe.completion.cs_format = 0; /* clear HW busy bit */ + }
- /* clear HW busy bit */ - ctrl->ctrl_info = 0; cmdq->cmd_infos[ci].cmd_type = HINIC3_CMD_TYPE_NONE;
wmb(); /* verify wqe is clear */
- hinic3_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); + hinic3_wq_put_wqebbs(&cmdq->wq, cmdq->cmd_infos[ci].wqebb_use_num); }
static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq, struct hinic3_cmdq_wqe *wqe, u16 ci) { - spin_lock(&cmdq->cmdq_lock); + /* cmdq already locked in poll mode */ + if (!cmdq->cmdqs->poll) + spin_lock(&cmdq->cmdq_lock);
cmdq_update_cmd_status(cmdq, ci, wqe);
@@ -1063,7 +1156,8 @@ static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq, cmdq->cmd_infos[ci].done = NULL; }
- spin_unlock(&cmdq->cmdq_lock); + if (!cmdq->cmdqs->poll) + spin_unlock(&cmdq->cmdq_lock);
cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev); clear_wqe_complete_bit(cmdq, wqe, ci); @@ -1077,20 +1171,6 @@ static void cmdq_async_cmd_handler(struct hinic3_hwdev *hwdev, clear_wqe_complete_bit(cmdq, wqe, ci); }
-static int cmdq_arm_ceq_handler(struct hinic3_cmdq *cmdq, - struct hinic3_cmdq_wqe *wqe, u16 ci) -{ - struct hinic3_ctrl *ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; - u32 ctrl_info = hinic3_hw_cpu32((ctrl)->ctrl_info); - - if (!WQE_COMPLETED(ctrl_info)) - return -EBUSY; - - clear_wqe_complete_bit(cmdq, wqe, ci); - - return 0; -} - #define HINIC3_CMDQ_WQE_HEAD_LEN 32 static void hinic3_dump_cmdq_wqe_head(struct hinic3_hwdev *hwdev, struct hinic3_cmdq_wqe *wqe) @@ -1105,59 +1185,83 @@ static void hinic3_dump_cmdq_wqe_head(struct hinic3_hwdev *hwdev, } }
+static int cmdq_type_default_ceq_handler(struct hinic3_hwdev *hwdev, + struct hinic3_cmdq_cmd_info *cmd_info, + struct hinic3_cmdq *cmdq, + struct hinic3_cmdq_wqe *wqe, u16 ci) +{ + struct hinic3_cmdq_wqe_lcmd *wqe_lcmd = NULL; + struct hinic3_ctrl *ctrl = NULL; + u32 ctrl_info; + + if (hwdev->cmdq_mode == HINIC3_NORMAL_CMDQ) { + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = hinic3_hw_cpu32((ctrl)->ctrl_info); + + if (WQE_COMPLETED(ctrl_info) == 0) + return -EBUSY; + } else { + ctrl_info = wqe->enhanced_cmdq_wqe.completion.cs_format; + ctrl_info = hinic3_hw_cpu32(ctrl_info); + if (ENHANCE_CMDQ_WQE_CS_GET(ctrl_info, HW_BUSY) == 0) + return -EBUSY; + } + dma_rmb(); + /* + * For FORCE_STOP cmd_type, we also need to wait for + * the firmware processing to complete to prevent the + * firmware from accessing the released cmd_buf + */ + if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) { + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + } else if (cmd_info->cmd_type == HINIC3_CMD_TYPE_ASYNC) { + cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); + } else { + cmdq_sync_cmd_handler(cmdq, wqe, ci); + } + + return 0; +} + void hinic3_cmdq_ceq_handler(void *handle, u32 ceqe_data) { struct hinic3_cmdqs *cmdqs = ((struct hinic3_hwdev *)handle)->cmdqs; enum hinic3_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); - struct hinic3_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; + struct hinic3_cmdq *cmdq = NULL; struct hinic3_hwdev *hwdev = cmdqs->hwdev; struct hinic3_cmdq_wqe *wqe = NULL; - struct hinic3_cmdq_wqe_lcmd *wqe_lcmd = NULL; - struct hinic3_ctrl *ctrl = NULL; struct hinic3_cmdq_cmd_info *cmd_info = NULL; u16 ci; + int err; + + if (cmdq_type >= HINIC3_MAX_CMDQ_TYPES) { + sdk_err(hwdev->dev_hdl, "Cmdq type invalid, type: %u\n", cmdq_type); + return; + } + cmdq = &cmdqs->cmdq[cmdq_type];
while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) { cmd_info = &cmdq->cmd_infos[ci]; - switch (cmd_info->cmd_type) { case HINIC3_CMD_TYPE_NONE: return; case HINIC3_CMD_TYPE_TIMEOUT: - sdk_warn(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", - cmdq_type, ci); + sdk_warn(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", cmdq_type, ci); hinic3_dump_cmdq_wqe_head(hwdev, wqe); - fallthrough; - case HINIC3_CMD_TYPE_FAKE_TIMEOUT: cmdq_clear_cmd_buf(cmd_info, hwdev); clear_wqe_complete_bit(cmdq, wqe, ci); break; - case HINIC3_CMD_TYPE_SET_ARM: - /* arm_bit was set until here */ - if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) - return; + case HINIC3_CMD_TYPE_FAKE_TIMEOUT: + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); break; default: - /* only arm bit is using scmd wqe, the wqe is lcmd */ - wqe_lcmd = &wqe->wqe_lcmd; - ctrl = &wqe_lcmd->ctrl; - if (!WQE_COMPLETED(hinic3_hw_cpu32((ctrl)->ctrl_info))) + err = cmdq_type_default_ceq_handler(hwdev, cmd_info, cmdq, wqe, ci); + if (err != 0) return; - - dma_rmb(); - /* For FORCE_STOP cmd_type, we also need to wait for - * the firmware processing to complete to prevent the - * firmware from accessing the released cmd_buf - */ - if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) { - cmdq_clear_cmd_buf(cmd_info, hwdev); - clear_wqe_complete_bit(cmdq, wqe, ci); - } else if (cmd_info->cmd_type == HINIC3_CMD_TYPE_ASYNC) { - cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); - } else { - cmdq_sync_cmd_handler(cmdq, wqe, ci); - } - break; } } @@ -1197,6 +1301,7 @@ static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev, cmdq->cmdq_type = q_type; cmdq->wrapped = 1; cmdq->hwdev = hwdev; + cmdq->cmdqs = hwdev->cmdqs;
spin_lock_init(&cmdq->cmdq_lock);
@@ -1224,14 +1329,20 @@ static void free_cmdq(struct hinic3_cmdq *cmdq) static int hinic3_set_cmdq_ctxts(struct hinic3_hwdev *hwdev) { struct hinic3_cmdqs *cmdqs = hwdev->cmdqs; + struct enhance_cmdq_ctxt_info *ctxt = NULL; u8 cmdq_type; int err;
cmdq_type = HINIC3_CMDQ_SYNC; for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { - err = hinic3_set_cmdq_ctxt(hwdev, cmdq_type, - &cmdqs->cmdq[cmdq_type].cmdq_ctxt); - if (err) + if (cmdqs->cmdq_mode == HINIC3_NORMAL_CMDQ) { + err = hinic3_set_cmdq_ctxt(hwdev, (u8)cmdq_type, + &cmdqs->cmdq[cmdq_type].cmdq_ctxt); + } else { + ctxt = &cmdqs->cmdq[cmdq_type].cmdq_enhance_ctxt; + err = hinic3_set_enhance_cmdq_ctxt(hwdev, (u8)cmdq_type, ctxt); + } + if (err != 0) return err; }
@@ -1271,8 +1382,8 @@ void hinic3_cmdq_flush_cmd(struct hinic3_hwdev *hwdev, spin_lock_bh(&cmdq->cmdq_lock);
while (cmdq_read_wqe(&cmdq->wq, &ci)) { - hinic3_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); cmd_info = &cmdq->cmd_infos[ci]; + hinic3_wq_put_wqebbs(&cmdq->wq, cmd_info->wqebb_use_num);
if (cmd_info->cmd_type == HINIC3_CMD_TYPE_DIRECT_RESP || cmd_info->cmd_type == HINIC3_CMD_TYPE_SGE_RESP) @@ -1386,13 +1497,13 @@ int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev) static int create_cmdq_wq(struct hinic3_cmdqs *cmdqs) { u8 type, cmdq_type; - int err; + int err = 0;
cmdq_type = HINIC3_CMDQ_SYNC; for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { err = hinic3_wq_create(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type].wq, - HINIC3_CMDQ_DEPTH, CMDQ_WQEBB_SIZE); - if (err) { + HINIC3_CMDQ_DEPTH, cmdqs->wqebb_size); + if (err != 0) { sdk_err(cmdqs->hwdev->dev_hdl, "Failed to create cmdq wq\n"); goto destroy_wq; } @@ -1453,40 +1564,57 @@ static void destroy_cmdq_wq(struct hinic3_cmdqs *cmdqs) static int init_cmdqs(struct hinic3_hwdev *hwdev) { struct hinic3_cmdqs *cmdqs = NULL; - u8 cmdq_num; - int err = -ENOMEM; - - if (COMM_SUPPORT_CMDQ_NUM(hwdev)) { - cmdq_num = hwdev->glb_attr.cmdq_num; - if (hwdev->glb_attr.cmdq_num > HINIC3_MAX_CMDQ_TYPES) { - sdk_warn(hwdev->dev_hdl, "Adjust cmdq num to %d\n", HINIC3_MAX_CMDQ_TYPES); - cmdq_num = HINIC3_MAX_CMDQ_TYPES; - } - } else { - cmdq_num = HINIC3_MAX_CMDQ_TYPES; - }
cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); if (!cmdqs) - return err; + return -ENOMEM;
hwdev->cmdqs = cmdqs; cmdqs->hwdev = hwdev; - cmdqs->cmdq_num = cmdq_num; + if (HINIC3_HWIF_NUM_CEQS(hwdev->hwif) == 0 || hwdev->poll != 0) + cmdqs->poll = true; + + if (COMM_SUPPORT_ONLY_ENHANCE_CMDQ(hwdev) != 0) + cmdqs->cmdq_mode = HINIC3_ENHANCE_CMDQ; + else + cmdqs->cmdq_mode = HINIC3_NORMAL_CMDQ; + + hwdev->cmdq_mode = cmdqs->cmdq_mode;
- cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev_hdl, - HINIC3_CMDQ_BUF_SIZE, HINIC3_CMDQ_BUF_SIZE, 0ULL); + if (cmdqs->cmdq_mode == HINIC3_NORMAL_CMDQ) { + cmdqs->wqebb_size = CMDQ_WQEBB_SIZE; + cmdqs->wqebb_use_num = NUM_WQEBBS_FOR_CMDQ_WQE; + } else { + cmdqs->wqebb_size = ENHANCE_CMDQ_WQEBB_SIZE; + cmdqs->wqebb_use_num = NUM_WQEBBS_FOR_ENHANCE_CMDQ_WQE; + } + + cmdqs->cmdq_num = HINIC3_MAX_CMDQ_TYPES; + if (COMM_SUPPORT_CMDQ_NUM(hwdev) != 0) { + if (hwdev->glb_attr.cmdq_num <= HINIC3_MAX_CMDQ_TYPES) + cmdqs->cmdq_num = hwdev->glb_attr.cmdq_num; + else + sdk_warn(hwdev->dev_hdl, "Adjust cmdq num to %d\n", HINIC3_MAX_CMDQ_TYPES); + } + + cmdqs->cmd_buf_size = HINIC3_CMDQ_MAX_BUF_SIZE; + if (COMM_SUPPORT_CMD_BUF_SIZE(hwdev) != 0) { + if (hwdev->glb_attr.cmd_buf_size <= HINIC3_CMDQ_MAX_BUF_SIZE) + cmdqs->cmd_buf_size = hwdev->glb_attr.cmd_buf_size; + else + sdk_warn(hwdev->dev_hdl, "Adjust cmd buf size to %d\n", + HINIC3_MAX_CMDQ_TYPES); + } + + cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev_hdl, cmdqs->cmd_buf_size, + HINIC3_CMDQ_BUF_ALIGN, 0ULL); if (!cmdqs->cmd_buf_pool) { sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); - goto pool_create_err; + kfree(cmdqs); + return -ENOMEM; }
return 0; - -pool_create_err: - kfree(cmdqs); - - return err; }
int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev) @@ -1497,17 +1625,17 @@ int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev) int err = -ENOMEM;
err = init_cmdqs(hwdev); - if (err) + if (err != 0) return err;
cmdqs = hwdev->cmdqs;
err = create_cmdq_wq(cmdqs); - if (err) + if (err != 0) goto create_wq_err;
err = hinic3_alloc_db_addr(hwdev, &db_base, NULL); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to allocate doorbell address\n"); goto alloc_db_err; } @@ -1515,17 +1643,20 @@ int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev) cmdqs->cmdqs_db_base = (u8 *)db_base; for (cmdq_type = HINIC3_CMDQ_SYNC; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n", cmdq_type); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%u\n", cmdq_type); goto init_cmdq_err; }
- cmdq_init_queue_ctxt(cmdqs, &cmdqs->cmdq[cmdq_type], - &cmdqs->cmdq[cmdq_type].cmdq_ctxt); + if (cmdqs->cmdq_mode == HINIC3_NORMAL_CMDQ) + cmdq_init_queue_ctxt(cmdqs, &cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq[cmdq_type].cmdq_ctxt); + else /* HINIC3_ENHANCE_CMDQ */ + enhanced_cmdq_init_queue_ctxt(cmdqs, &cmdqs->cmdq[cmdq_type]); }
err = hinic3_set_cmdq_ctxts(hwdev); - if (err) + if (err != 0) goto init_cmdq_err;
return 0; diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h index b8b491a..b9b884f 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h @@ -9,6 +9,7 @@ #include <linux/spinlock.h>
#include "mpu_inband_cmd_defs.h" +#include "hinic3_cmdq_enhance.h" #include "hinic3_hw.h" #include "hinic3_wq.h" #include "hinic3_common.h" @@ -22,6 +23,14 @@ struct dma_pool { #define HINIC3_SCMD_DATA_LEN 16
#define HINIC3_CMDQ_DEPTH 4096 +#define HINIC3_CMDQ_MAX_BUF_SIZE 2048U +#define HINIC3_CMDQ_MIN_BUF_SIZE 4U +#define HINIC3_CMDQ_BUF_ALIGN 2048U + +enum hinic3_cmdq_mode { + HINIC3_NORMAL_CMDQ, + HINIC3_ENHANCE_CMDQ, +};
enum hinic3_cmdq_type { HINIC3_CMDQ_SYNC, @@ -39,6 +48,12 @@ enum hinic3_cmdq_db_type { HINIC3_DB_CMDQ_TYPE, };
+enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, + BUFDESC_SCMD_LEN = 3, + BUFDESC_ENHANCE_CMD_LEN = 3, /* 64B aligned */ +}; + /* hardware define: cmdq wqe */ struct hinic3_cmdq_header { u32 header_info; @@ -108,9 +123,18 @@ struct hinic3_cmdq_wqe { union { struct hinic3_cmdq_inline_wqe inline_wqe; struct hinic3_cmdq_wqe_lcmd wqe_lcmd; + struct hinic3_enhanced_cmdq_wqe enhanced_cmdq_wqe; }; };
+struct hinic3_cmdq_cmd_param { + u8 mod; + u8 cmd; + struct hinic3_cmd_buf *buf_in; + struct hinic3_cmd_buf *buf_out; + u64 *out_param; +}; + struct hinic3_cmdq_arm_bit { u32 q_type; u32 q_id; @@ -131,10 +155,22 @@ enum hinic3_cmdq_cmd_type { HINIC3_CMD_TYPE_FORCE_STOP, };
+enum data_format { + DATA_SGE, + DATA_DIRECT, +}; + +#define WQ_BLOCK_PFN_SHIFT 9 +#define CMDQ_PFN_SHIFT 12 + +#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) +#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) + struct hinic3_cmdq_cmd_info { enum hinic3_cmdq_cmd_type cmd_type; u16 channel; u16 rsvd1; + u16 wqebb_use_num;
struct completion *done; int *errcode; @@ -156,10 +192,12 @@ struct hinic3_cmdq { spinlock_t cmdq_lock;
struct cmdq_ctxt_info cmdq_ctxt; + struct enhance_cmdq_ctxt_info cmdq_enhance_ctxt;
struct hinic3_cmdq_cmd_info *cmd_infos;
struct hinic3_hwdev *hwdev; + struct hinic3_cmdqs *cmdqs; u64 rsvd1[2]; };
@@ -181,8 +219,13 @@ struct hinic3_cmdqs { bool lock_channel_en; unsigned long channel_stop; u8 cmdq_num; + u8 cmdq_mode; + u8 wqebb_size; + u8 wqebb_use_num; u32 rsvd1; u64 rsvd2; + u32 cmd_buf_size; + bool poll; /* use polling mode or int mode */ };
void hinic3_cmdq_ceq_handler(void *handle, u32 ceqe_data); @@ -205,5 +248,8 @@ void hinic3_cmdq_enable_channel_lock(struct hinic3_hwdev *hwdev, bool enable);
void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev);
+void enhanced_cmdq_set_wqe(struct hinic3_cmdq_wqe *wqe, enum hinic3_cmdq_cmd_type cmd_type, + const struct hinic3_cmdq_cmd_param *cmd_buf, int wrapped); +void enhanced_cmdq_init_queue_ctxt(struct hinic3_cmdqs *cmdqs, struct hinic3_cmdq *cmdq); #endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.c new file mode 100644 index 0000000..c212bcc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include "ossl_knl.h" +#include "npu_cmdq_base_defs.h" +#include "comm_defs.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" +#include "hinic3_eqs.h" +#include "hinic3_common.h" +#include "hinic3_wq.h" +#include "hinic3_hw_comm.h" +#include "hinic3_cmdq.h" + +void enhanced_cmdq_init_queue_ctxt(struct hinic3_cmdqs *cmdqs, struct hinic3_cmdq *cmdq) +{ + struct enhance_cmdq_ctxt_info *ctxt_info = &cmdq->cmdq_enhance_ctxt; + struct hinic3_wq *wq = &cmdq->wq; + u64 cmdq_first_block_paddr, pfn; + u16 start_ci = (u16)wq->cons_idx; + u32 start_pi = (u16)wq->prod_idx; + + pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq)); + + /* first part 16B */ + if (cmdq->cmdqs->poll) { + ctxt_info->eq_cfg = + ENHANCED_CMDQ_SET(pfn, CTXT0_CI_WQE_ADDR) | + ENHANCED_CMDQ_SET(HINIC3_CEQ_ID_CMDQ, CTXT0_EQ) | + ENHANCED_CMDQ_SET(0, CTXT0_CEQ_ARM) | + ENHANCED_CMDQ_SET(0, CTXT0_CEQ_EN) | + ENHANCED_CMDQ_SET(1, CTXT0_HW_BUSY_BIT); + } else { + ctxt_info->eq_cfg = + ENHANCED_CMDQ_SET(pfn, CTXT0_CI_WQE_ADDR) | + ENHANCED_CMDQ_SET(HINIC3_CEQ_ID_CMDQ, CTXT0_EQ) | + ENHANCED_CMDQ_SET(1, CTXT0_CEQ_ARM) | + ENHANCED_CMDQ_SET(1, CTXT0_CEQ_EN) | + ENHANCED_CMDQ_SET(1, CTXT0_HW_BUSY_BIT); + } + + ctxt_info->dfx_pi_ci = + ENHANCED_CMDQ_SET(0, CTXT1_Q_DIS) | + ENHANCED_CMDQ_SET(0, CTXT1_ERR_CODE) | + ENHANCED_CMDQ_SET(start_pi, CTXT1_PI) | + ENHANCED_CMDQ_SET(start_ci, CTXT1_CI); + + /* second part 16B */ + ctxt_info->pft_thd = + ENHANCED_CMDQ_SET(CI_HIGN_IDX(start_ci), CTXT2_PFT_CI) | + ENHANCED_CMDQ_SET(1, CTXT2_O_BIT) | + ENHANCED_CMDQ_SET(WQ_PREFETCH_MIN, CTXT2_PFT_MIN) | + ENHANCED_CMDQ_SET(WQ_PREFETCH_MAX, CTXT2_PFT_MAX) | + ENHANCED_CMDQ_SET(WQ_PREFETCH_THRESHOLD, CTXT2_PFT_THD); + ctxt_info->pft_ci = + ENHANCED_CMDQ_SET(pfn, CTXT3_PFT_CI_ADDR) | + ENHANCED_CMDQ_SET(start_ci, CTXT3_PFT_CI); + + /* third part 16B */ + cmdq_first_block_paddr = cmdqs->wq_block_paddr; + pfn = WQ_BLOCK_PFN(cmdq_first_block_paddr); + + ctxt_info->ci_cla_addr = ENHANCED_CMDQ_SET(pfn, CTXT4_CI_CLA_ADDR); +} + +static void enhance_cmdq_set_completion(struct hinic3_cmdq_enhance_completion *completion, + const struct hinic3_cmd_buf *buf_out) +{ + completion->sge_resp_hi_addr = upper_32_bits(buf_out->dma_addr); + completion->sge_resp_lo_addr = lower_32_bits(buf_out->dma_addr); + completion->sge_resp_len = buf_out->size; +} + +static void cmdq_set_wqe_buf_desc( + struct hinic3_enhanced_cmdq_wqe *enhanced_wqe, + const struct hinic3_cmdq_cmd_param *cmd_buf, u32 len) +{ + enhanced_wqe->buf_desc[0].sge_send_hi_addr = upper_32_bits(cmd_buf->buf_in->dma_addr + len); + enhanced_wqe->buf_desc[0].sge_send_lo_addr = lower_32_bits(cmd_buf->buf_in->dma_addr + len); + enhanced_wqe->buf_desc[0].len = len; + + enhanced_wqe->buf_desc[1].sge_send_hi_addr = + upper_32_bits(cmd_buf->buf_in->dma_addr + (len << 1)); + enhanced_wqe->buf_desc[1].sge_send_lo_addr = + lower_32_bits(cmd_buf->buf_in->dma_addr + (len << 1)); + enhanced_wqe->buf_desc[1].len = cmd_buf->buf_in->size - (len << 1); +} + +void enhanced_cmdq_set_wqe(struct hinic3_cmdq_wqe *wqe, enum hinic3_cmdq_cmd_type cmd_type, + const struct hinic3_cmdq_cmd_param *cmd_buf, int wrapped) +{ + struct hinic3_enhanced_cmdq_wqe *enhanced_wqe = NULL; + u32 len = 0; + + if (!wqe || !cmd_buf || !cmd_buf->buf_in) { + pr_err("wqe or buf_in is null\n"); + return; + } + + enhanced_wqe = &wqe->enhanced_cmdq_wqe; + len = cmd_buf->buf_in->size / 3; /* Wqe should be 64B aligned, so we fill 3 sges */ + + enhanced_wqe->ctrl_sec.header = ENHANCE_CMDQ_WQE_HEADER_SET(len, SEND_SGE_LEN) | + ENHANCE_CMDQ_WQE_HEADER_SET(BUFDESC_ENHANCE_CMD_LEN, BDSL) | + ENHANCE_CMDQ_WQE_HEADER_SET(DATA_SGE, DF) | + ENHANCE_CMDQ_WQE_HEADER_SET(NORMAL_WQE_TYPE, DN) | + ENHANCE_CMDQ_WQE_HEADER_SET(COMPACT_WQE_TYPE, EC) | + ENHANCE_CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + enhanced_wqe->ctrl_sec.sge_send_hi_addr = upper_32_bits(cmd_buf->buf_in->dma_addr); + enhanced_wqe->ctrl_sec.sge_send_lo_addr = lower_32_bits(cmd_buf->buf_in->dma_addr); + + cmdq_set_wqe_buf_desc(enhanced_wqe, cmd_buf, len); + + enhanced_wqe->completion.cs_format = ENHANCE_CMDQ_WQE_CS_SET(cmd_buf->cmd, CMD) | + ENHANCE_CMDQ_WQE_CS_SET(HINIC3_ACK_TYPE_CMDQ, ACK_TYPE) | + ENHANCE_CMDQ_WQE_CS_SET((cmd_buf->mod == HINIC3_MOD_ROCE), RN) | + ENHANCE_CMDQ_WQE_CS_SET(cmd_buf->mod, MOD); + + switch (cmd_type) { + case HINIC3_CMD_TYPE_DIRECT_RESP: + enhanced_wqe->completion.cs_format |= ENHANCE_CMDQ_WQE_CS_SET(INLINE_DATA, CF); + break; + case HINIC3_CMD_TYPE_SGE_RESP: + if (cmd_buf->buf_out) { + enhanced_wqe->completion.cs_format |= + ENHANCE_CMDQ_WQE_CS_SET(SGE_RESPONSE, CF); + enhance_cmdq_set_completion(&enhanced_wqe->completion, cmd_buf->buf_out); + } + break; + case HINIC3_CMD_TYPE_ASYNC: + break; + default: + break; + } +} + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.h new file mode 100644 index 0000000..5a651c4 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq_enhance.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_ENHANCED_CMDQ_H +#define HINIC3_ENHANCED_CMDQ_H + +#include "hinic3_hw.h" + +enum complete_format { + INLINE_DATA = 0, + SGE_RESPONSE = 1, +}; + +#define HINIC3_CMDQ_MAX_INLINE_DATA_SIZE 160U +#define HINIC3_CMDQ_WQE_INLINE_DATA_PI_OFFSET 2 + +/* first part 16B */ +#define ENHANCED_CMDQ_CTXT0_CI_WQE_ADDR_SHIFT 0 +#define ENHANCED_CMDQ_CTXT0_RSV1_SHIFT 52 +#define ENHANCED_CMDQ_CTXT0_EQ_SHIFT 53 +#define ENHANCED_CMDQ_CTXT0_CEQ_ARM_SHIFT 61 +#define ENHANCED_CMDQ_CTXT0_CEQ_EN_SHIFT 62 +#define ENHANCED_CMDQ_CTXT0_HW_BUSY_BIT_SHIFT 63 + +#define ENHANCED_CMDQ_CTXT0_CI_WQE_ADDR_MASK 0xFFFFFFFFFFFFFU +#define ENHANCED_CMDQ_CTXT0_RSV1_MASK 0x1U +#define ENHANCED_CMDQ_CTXT0_EQ_MASK 0xFFU +#define ENHANCED_CMDQ_CTXT0_CEQ_ARM_MASK 0x1U +#define ENHANCED_CMDQ_CTXT0_CEQ_EN_MASK 0x1U +#define ENHANCED_CMDQ_CTXT0_HW_BUSY_BIT_MASK 0x1U + +#define ENHANCED_CMDQ_CTXT1_Q_DIS_SHIFT 0 +#define ENHANCED_CMDQ_CTXT1_ERR_CODE_SHIFT 1 +#define ENHANCED_CMDQ_CTXT1_RSV1_SHIFT 3 +#define ENHANCED_CMDQ_CTXT1_PI_SHIFT 32 +#define ENHANCED_CMDQ_CTXT1_CI_SHIFT 48 + +#define ENHANCED_CMDQ_CTXT1_Q_DIS_MASK 0x1U +#define ENHANCED_CMDQ_CTXT1_ERR_CODE_MASK 0x3U +#define ENHANCED_CMDQ_CTXT1_RSV1_MASK 0x1FFFFFFFU +#define ENHANCED_CMDQ_CTXT1_PI_MASK 0xFFFFU +#define ENHANCED_CMDQ_CTXT1_CI_MASK 0xFFFFU + +/* second PART 16B */ +#define ENHANCED_CMDQ_CTXT2_PFT_CI_SHIFT 0 +#define ENHANCED_CMDQ_CTXT2_O_BIT_SHIFT 4 +#define ENHANCED_CMDQ_CTXT2_PFT_THD_SHIFT 32 +#define ENHANCED_CMDQ_CTXT2_PFT_MAX_SHIFT 46 +#define ENHANCED_CMDQ_CTXT2_PFT_MIN_SHIFT 57 + +#define ENHANCED_CMDQ_CTXT2_PFT_CI_MASK 0xFU +#define ENHANCED_CMDQ_CTXT2_O_BIT_MASK 0x1U +#define ENHANCED_CMDQ_CTXT2_PFT_THD_MASK 0x3FFFFU +#define ENHANCED_CMDQ_CTXT2_PFT_MAX_MASK 0x7FFFU +#define ENHANCED_CMDQ_CTXT2_PFT_MIN_MASK 0x7FU + +#define ENHANCED_CMDQ_CTXT3_PFT_CI_ADDR_SHIFT 0 +#define ENHANCED_CMDQ_CTXT3_PFT_CI_SHIFT 52 + +#define ENHANCED_CMDQ_CTXT3_PFT_CI_ADDR_MASK 0xFFFFFFFFFFFFFU +#define ENHANCED_CMDQ_CTXT3_PFT_CI_MASK 0xFFFFU + +/* THIRD PART 16B */ +#define ENHANCED_CMDQ_CTXT4_CI_CLA_ADDR_SHIFT 0 + +#define ENHANCED_CMDQ_CTXT4_CI_CLA_ADDR_MASK 0x7FFFFFFFFFFFFFU + +#define ENHANCED_CMDQ_SET(val, member) \ + (((u64)(val) & ENHANCED_CMDQ_##member##_MASK) << \ + ENHANCED_CMDQ_##member##_SHIFT) + +#define WQ_PREFETCH_MAX 4 +#define WQ_PREFETCH_MIN 1 +#define WQ_PREFETCH_THRESHOLD 256 + +#define CI_IDX_HIGH_SHIFH 12 +#define CI_HIGN_IDX(val) ((val) >> CI_IDX_HIGH_SHIFH) + +#define ENHANCE_CMDQ_WQE_HEADER_SEND_SGE_LEN_SHIFT 0 +#define ENHANCE_CMDQ_WQE_HEADER_BDSL_SHIFT 19 +#define ENHANCE_CMDQ_WQE_HEADER_DF_SHIFT 28 +#define ENHANCE_CMDQ_WQE_HEADER_DN_SHIFT 29 +#define ENHANCE_CMDQ_WQE_HEADER_EC_SHIFT 30 +#define ENHANCE_CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define ENHANCE_CMDQ_WQE_HEADER_SEND_SGE_LEN_MASK 0x3FFFFU +#define ENHANCE_CMDQ_WQE_HEADER_BDSL_MASK 0xFFU +#define ENHANCE_CMDQ_WQE_HEADER_DF_MASK 0x1U +#define ENHANCE_CMDQ_WQE_HEADER_DN_MASK 0x1U +#define ENHANCE_CMDQ_WQE_HEADER_EC_MASK 0x1U +#define ENHANCE_CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define ENHANCE_CMDQ_WQE_HEADER_SET(val, member) \ + ((((u32)(val)) & ENHANCE_CMDQ_WQE_HEADER_##member##_MASK) << \ + ENHANCE_CMDQ_WQE_HEADER_##member##_SHIFT) + +#define ENHANCE_CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> ENHANCE_CMDQ_WQE_HEADER_##member##_SHIFT) & \ + ENHANCE_CMDQ_WQE_HEADER_##member##_MASK) + +#define ENHANCE_CMDQ_WQE_CS_ERR_CODE_SHIFT 0 +#define ENHANCE_CMDQ_WQE_CS_CMD_SHIFT 4 +#define ENHANCE_CMDQ_WQE_CS_ACK_TYPE_SHIFT 12 +#define ENHANCE_CMDQ_WQE_CS_HW_BUSY_SHIFT 14 +#define ENHANCE_CMDQ_WQE_CS_RN_SHIFT 15 +#define ENHANCE_CMDQ_WQE_CS_MOD_SHIFT 16 +#define ENHANCE_CMDQ_WQE_CS_CF_SHIFT 31 + +#define ENHANCE_CMDQ_WQE_CS_ERR_CODE_MASK 0xFU +#define ENHANCE_CMDQ_WQE_CS_CMD_MASK 0xFFU +#define ENHANCE_CMDQ_WQE_CS_ACK_TYPE_MASK 0x3U +#define ENHANCE_CMDQ_WQE_CS_HW_BUSY_MASK 0x1U +#define ENHANCE_CMDQ_WQE_CS_RN_MASK 0x1U +#define ENHANCE_CMDQ_WQE_CS_MOD_MASK 0x1FU +#define ENHANCE_CMDQ_WQE_CS_CF_MASK 0x1U + +#define ENHANCE_CMDQ_WQE_CS_SET(val, member) \ + ((((u32)(val)) & ENHANCE_CMDQ_WQE_CS_##member##_MASK) << \ + ENHANCE_CMDQ_WQE_CS_##member##_SHIFT) + +#define ENHANCE_CMDQ_WQE_CS_GET(val, member) \ + (((val) >> ENHANCE_CMDQ_WQE_CS_##member##_SHIFT) & \ + ENHANCE_CMDQ_WQE_CS_##member##_MASK) + +struct hinic3_cmdq_enhance_completion { + u32 cs_format; + u32 sge_resp_hi_addr; + u32 sge_resp_lo_addr; + u32 sge_resp_len; /* bit 14~31 rsvd, soft can't use. */ +}; + +struct hinic3_cmdq_enhance_response { + u32 cs_format; + u32 resvd; + u64 direct_data; +}; + +struct sge_send_info { + u32 sge_hi_addr; + u32 sge_li_addr; + u32 seg_len; + u32 rsvd; +}; + +#define NORMAL_WQE_TYPE 0 +#define COMPACT_WQE_TYPE 1 +struct hinic3_ctrl_section { + u32 header; + u32 rsv; + u32 sge_send_hi_addr; + u32 sge_send_lo_addr; +}; + +struct hinic3_enhanced_cmd_bufdesc { + u32 len; + u32 rsv; + u32 sge_send_hi_addr; + u32 sge_send_lo_addr; +}; + +struct hinic3_enhanced_cmdq_wqe { + struct hinic3_ctrl_section ctrl_sec; /* 16B */ + struct hinic3_cmdq_enhance_completion completion; /* 16B */ + union { + struct hinic3_enhanced_cmd_bufdesc buf_desc[2]; /* 32B */ + u8 inline_data[HINIC3_CMDQ_MAX_INLINE_DATA_SIZE]; /* 160B max */ + }; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c index a4c0235..937d699 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c @@ -593,6 +593,24 @@ static void reschedule_eq_handler(struct hinic3_eq *eq) } }
+int hinic3_reschedule_eq(struct hinic3_hwdev *hwdev, enum hinic3_eq_type type, + u16 eq_id) +{ + if (type == HINIC3_AEQ) { + if (eq_id >= hwdev->aeqs->num_aeqs) + return -EINVAL; + + reschedule_eq_handler(&(hwdev->aeqs->aeq[eq_id])); + } else { + if (eq_id >= hwdev->ceqs->num_ceqs) + return -EINVAL; + + reschedule_eq_handler(&(hwdev->ceqs->ceq[eq_id])); + } + + return 0; +} + /** * eq_irq_handler - handler for the eq event * @data: the event queue of the event diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.h index a6b83c3..2a6eda3 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.h @@ -161,4 +161,6 @@ void hinic3_dump_ceq_info(struct hinic3_hwdev *hwdev);
void hinic3_dump_aeq_info(struct hinic3_hwdev *hwdev);
+int hinic3_reschedule_eq(struct hinic3_hwdev *hwdev, enum hinic3_eq_type type, + u16 eq_id); #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c index cea7581..819c56f 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c @@ -650,8 +650,7 @@ static int cfg_init_eq(struct hinic3_hwdev *dev) cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain);
if (!num_ceq) { - sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n"); - return -EFAULT; + return 0; }
eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c index d8a1a28..8a87d25 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c @@ -267,13 +267,16 @@ int hinic3_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel) } EXPORT_SYMBOL(hinic3_func_reset);
-static u16 get_hw_rx_buf_size(int rx_buf_sz) +static u16 get_hw_rx_buf_size(const void *hwdev, int rx_buf_sz) { u16 num_hw_types = sizeof(hinic3_hw_rx_buf_size) / sizeof(hinic3_hw_rx_buf_size[0]); u16 i;
+ if (COMM_IS_USE_REAL_RX_BUF_SIZE((struct hinic3_hwdev *)hwdev)) + return rx_buf_sz; + for (i = 0; i < num_hw_types; i++) { if (hinic3_hw_rx_buf_size[i] == rx_buf_sz) return i; @@ -303,7 +306,7 @@ int hinic3_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, int rx_buf_sz, root_ctxt.lro_en = 1;
root_ctxt.rq_depth = (u16)ilog2(rq_depth); - root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); + root_ctxt.rx_buf_sz = get_hw_rx_buf_size(hwdev, rx_buf_sz); root_ctxt.sq_depth = (u16)ilog2(sq_depth);
err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, @@ -357,6 +360,10 @@ int hinic3_set_cmdq_depth(void *hwdev, u16 cmdq_depth)
root_ctxt.set_cmdq_depth = 1; root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); + root_ctxt.cmdq_mode = ((struct hinic3_hwdev *)hwdev)->cmdq_mode; + + if (((struct hinic3_hwdev *)hwdev)->cmdq_mode == HINIC3_ENHANCE_CMDQ) + root_ctxt.cmdq_depth--;
err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_VAT, &root_ctxt, sizeof(root_ctxt), &root_ctxt, &out_size); @@ -370,6 +377,30 @@ int hinic3_set_cmdq_depth(void *hwdev, u16 cmdq_depth) return 0; }
+int hinic3_set_enhance_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id, + struct enhance_cmdq_ctxt_info *ctxt) +{ + struct comm_cmd_enhance_cmdq_ctxt cmdq_ctxt; + u16 out_size = sizeof(cmdq_ctxt); + int err; + + memset(&cmdq_ctxt, 0, sizeof(cmdq_ctxt)); + memcpy(&cmdq_ctxt.ctxt, ctxt, sizeof(*ctxt)); + cmdq_ctxt.func_id = hinic3_global_func_id(hwdev); + cmdq_ctxt.cmdq_id = cmdq_id; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_ENHANCE_CMDQ_CTXT, + &cmdq_ctxt, sizeof(cmdq_ctxt), + &cmdq_ctxt, &out_size); + if ((err != 0) || (out_size == 0) || (cmdq_ctxt.head.status != 0)) { + sdk_err(hwdev->dev_hdl, "Failed to set enhanced cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmdq_ctxt.head.status, out_size); + return -EFAULT; + } + + return 0; +} + int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id, struct cmdq_ctxt_info *ctxt) { diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h index e031ec4..8913bfc 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h @@ -29,6 +29,9 @@ enum ppf_tmr_status {
int hinic3_set_cmdq_depth(void *hwdev, u16 cmdq_depth);
+int hinic3_set_enhance_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id, + struct enhance_cmdq_ctxt_info *ctxt); + int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id, struct cmdq_ctxt_info *ctxt);
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h index e739767..b8a3356 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h @@ -180,11 +180,13 @@ struct hinic3_hwdev { u16 aeq_busy_cnt;
u64 rsvd4[8]; + u8 cmdq_mode; };
#define HINIC3_DRV_FEATURE_QW0 \ (COMM_F_API_CHAIN | COMM_F_CLP | COMM_F_MBOX_SEGMENT | \ - COMM_F_CMDQ_NUM | COMM_F_VIRTIO_VQ_SIZE) + COMM_F_CMDQ_NUM | COMM_F_VIRTIO_VQ_SIZE | COMM_F_USE_REAL_RX_BUF_SIZE | \ + COMM_F_CMD_BUF_SIZE | COMM_F_ONLY_ENHANCE_CMDQ)
#define HINIC3_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num) #define HINIC3_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num) @@ -197,7 +199,10 @@ struct hinic3_hwdev { #define COMM_SUPPORT_CHANNEL_DETECT(hwdev) COMM_FEATURE_QW0(hwdev, CHANNEL_DETECT) #define COMM_SUPPORT_MBOX_SEGMENT(hwdev) (hinic3_pcie_itf_id(hwdev) == SPU_HOST_ID) #define COMM_SUPPORT_CMDQ_NUM(hwdev) COMM_FEATURE_QW0(hwdev, CMDQ_NUM) +#define COMM_SUPPORT_CMD_BUF_SIZE(hwdev) COMM_FEATURE_QW0(hwdev, CMD_BUF_SIZE) #define COMM_SUPPORT_VIRTIO_VQ_SIZE(hwdev) COMM_FEATURE_QW0(hwdev, VIRTIO_VQ_SIZE) +#define COMM_IS_USE_REAL_RX_BUF_SIZE(hwdev) COMM_FEATURE_QW0(hwdev, USE_REAL_RX_BUF_SIZE) +#define COMM_SUPPORT_ONLY_ENHANCE_CMDQ(hwdev) COMM_FEATURE_QW0(hwdev, ONLY_ENHANCE_CMDQ)
void set_func_host_mode(struct hinic3_hwdev *hwdev, enum hinic3_func_mode mode);
diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h index 89d5cc4..291e475 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h @@ -1,85 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2024 Huawei Technologies Co., Ltd */
-#ifndef COMM_DEFS_H -#define COMM_DEFS_H +#ifndef MPU_CMD_BASE_DEFS_H +#define MPU_CMD_BASE_DEFS_H
#include "mgmt_msg_base.h" - -/** MPU CMD MODULE TYPE */ -enum hinic3_mod_type { - HINIC3_MOD_COMM = 0, /* HW communication module */ - HINIC3_MOD_L2NIC = 1, /* L2NIC module */ - HINIC3_MOD_ROCE = 2, - HINIC3_MOD_PLOG = 3, - HINIC3_MOD_TOE = 4, - HINIC3_MOD_FLR = 5, - HINIC3_MOD_VROCE = 6, - HINIC3_MOD_CFGM = 7, /* Configuration management */ - HINIC3_MOD_CQM = 8, - HINIC3_MOD_VMSEC = 9, - COMM_MOD_FC = 10, - HINIC3_MOD_OVS = 11, - HINIC3_MOD_DSW = 12, - HINIC3_MOD_MIGRATE = 13, - HINIC3_MOD_HILINK = 14, - HINIC3_MOD_CRYPT = 15, /* secure crypto module */ - HINIC3_MOD_VIO = 16, - HINIC3_MOD_IMU = 17, - HINIC3_MOD_DFX = 18, /* DFX */ - HINIC3_MOD_HW_MAX = 19, /* hardware max module id */ - /* Software module id, for PF/VF and multi-host */ - HINIC3_MOD_SW_FUNC = 20, - HINIC3_MOD_MAX, -}; - -/* Func reset flag, Specifies the resource to be cleaned.*/ -enum func_reset_flag_e { - RES_TYPE_FLUSH_BIT = 0, - RES_TYPE_MQM, - RES_TYPE_SMF, - RES_TYPE_PF_BW_CFG, - - RES_TYPE_COMM = 10, - RES_TYPE_COMM_MGMT_CH, /* clear mbox and aeq, The RES_TYPE_COMM bit must be set */ - RES_TYPE_COMM_CMD_CH, /* clear cmdq and ceq, The RES_TYPE_COMM bit must be set */ - RES_TYPE_NIC, - RES_TYPE_OVS, - RES_TYPE_VBS, - RES_TYPE_ROCE, - RES_TYPE_FC, - RES_TYPE_TOE, - RES_TYPE_IPSEC, - RES_TYPE_MAX, -}; - -#define HINIC3_COMM_RES \ - ((1 << RES_TYPE_COMM) | (1 << RES_TYPE_COMM_CMD_CH) | \ - (1 << RES_TYPE_FLUSH_BIT) | (1 << RES_TYPE_MQM) | \ - (1 << RES_TYPE_SMF) | (1 << RES_TYPE_PF_BW_CFG)) - -#define HINIC3_NIC_RES BIT(RES_TYPE_NIC) -#define HINIC3_OVS_RES BIT(RES_TYPE_OVS) -#define HINIC3_VBS_RES BIT(RES_TYPE_VBS) -#define HINIC3_ROCE_RES BIT(RES_TYPE_ROCE) -#define HINIC3_FC_RES BIT(RES_TYPE_FC) -#define HINIC3_TOE_RES BIT(RES_TYPE_TOE) -#define HINIC3_IPSEC_RES BIT(RES_TYPE_IPSEC) - -/* MODE OVS���NIC���UNKNOWN */ -#define HINIC3_WORK_MODE_OVS 0 -#define HINIC3_WORK_MODE_UNKNOWN 1 -#define HINIC3_WORK_MODE_NIC 2 - -#define DEVICE_TYPE_L2NIC 0 -#define DEVICE_TYPE_NVME 1 -#define DEVICE_TYPE_VIRTIO_NET 2 -#define DEVICE_TYPE_VIRTIO_BLK 3 -#define DEVICE_TYPE_VIRTIO_VSOCK 4 -#define DEVICE_TYPE_VIRTIO_NET_TRANSITION 5 -#define DEVICE_TYPE_VIRTIO_BLK_TRANSITION 6 -#define DEVICE_TYPE_VIRTIO_SCSI_TRANSITION 7 -#define DEVICE_TYPE_VIRTIO_HPC 8 +#include "comm_defs.h"
enum hinic3_svc_type { SVC_T_COMM = 0, @@ -97,20 +23,4 @@ enum hinic3_svc_type { SVC_T_MAX, };
-/** - * Common header control information of the COMM message interaction command word - * between the driver and PF. - */ -struct comm_info_head { - /** response status code, 0: success, others: error code */ - u8 status; - - /** firmware version for command */ - u8 version; - - /** response aeq number, unused for now */ - u8 rep_aeq_num; - u8 rsvd[5]; -}; - #endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h index b24e729..a8e2d63 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h @@ -27,6 +27,8 @@ enum hinic3_mgmt_cmd { COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, /* config msix ctrl register @see comm_cmd_msix_config */ COMM_MGMT_CMD_SET_CEQ_CTRL_REG, /**< set ceq ctrl register @see comm_cmd_ceq_ctrl_reg */ COMM_MGMT_CMD_SET_DMA_ATTR, /**< set PF/VF DMA table attr @see comm_cmd_dma_attr_config */ + COMM_MGMT_CMD_SET_ENHANCE_CMDQ_CTXT, + COMM_MGMT_CMD_SET_PPF_TBL_HTR_FLG, /* set PPF func table os hotreplace flag * @see comm_cmd_ppf_tbl_htrp_config */ diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h index f535777..674603d 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h @@ -28,6 +28,9 @@ enum { COMM_F_MBOX_SEGMENT = 1U << 3, COMM_F_CMDQ_NUM = 1U << 4, COMM_F_VIRTIO_VQ_SIZE = 1U << 5, + COMM_F_ONLY_ENHANCE_CMDQ = 1U << 8, + COMM_F_USE_REAL_RX_BUF_SIZE = 1U << 9, + COMM_F_CMD_BUF_SIZE = 1U << 10, };
#define COMM_MAX_FEATURE_QWORD 4 @@ -74,7 +77,8 @@ struct comm_global_attr {
u8 mgmt_host_node_id; /**< node id */ u8 cmdq_num; /**< cmdq num */ - u8 rsvd1[2]; + u16 cmd_buf_size; /**< cmd buff size */ + u32 rsvd2[8]; };
@@ -126,6 +130,27 @@ struct comm_cmd_cmdq_ctxt { struct cmdq_ctxt_info ctxt; };
+struct enhance_cmdq_ctxt_info { + u64 eq_cfg; + u64 dfx_pi_ci; + + u64 pft_thd; + u64 pft_ci; + + u64 rsv; + u64 ci_cla_addr; +}; + +struct comm_cmd_enhance_cmdq_ctxt { + struct mgmt_msg_head head; + + u16 func_id; + u8 cmdq_id; + u8 rsvd1[5]; + + struct enhance_cmdq_ctxt_info ctxt; +}; + struct comm_cmd_root_ctxt { struct mgmt_msg_head head;
@@ -134,7 +159,7 @@ struct comm_cmd_root_ctxt { u8 cmdq_depth; u16 rx_buf_sz; u8 lro_en; - u8 rsvd1; + u8 cmdq_mode; u16 sq_depth; u16 rq_depth; u64 rsvd2; diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h index fe663e1..0bd168c 100644 --- a/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h @@ -43,7 +43,7 @@ enum nic_rss_hash_type { #define NIC_DCB_TC_MAX 0x8 #define NIC_DCB_PG_MAX 0x8 #define NIC_DCB_TSA_SP 0x0 -#define NIC_DCB_TSA_CBS 0x1 /* hi1822 do NOT support */ +#define NIC_DCB_TSA_CBS 0x1 #define NIC_DCB_TSA_ETS 0x2 #define NIC_DCB_DSCP_NUM 0x8 #define NIC_DCB_IP_PRI_MAX 0x40 diff --git a/drivers/net/ethernet/huawei/hinic3/nic_cfg_comm.h b/drivers/net/ethernet/huawei/hinic3/nic_cfg_comm.h index 9fb4232..abcfa27 100644 --- a/drivers/net/ethernet/huawei/hinic3/nic_cfg_comm.h +++ b/drivers/net/ethernet/huawei/hinic3/nic_cfg_comm.h @@ -12,6 +12,8 @@ #ifndef NIC_CFG_COMM_H #define NIC_CFG_COMM_H
+#include <linux/types.h> + /* rss */ #define HINIC3_RSS_TYPE_VALID_SHIFT 23 #define HINIC3_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 @@ -51,7 +53,7 @@ enum nic_rss_hash_type { #define NIC_DCB_TC_MAX 0x8 #define NIC_DCB_PG_MAX 0x8 #define NIC_DCB_TSA_SP 0x0 -#define NIC_DCB_TSA_CBS 0x1 /* hi1822 do NOT support */ +#define NIC_DCB_TSA_CBS 0x1 #define NIC_DCB_TSA_ETS 0x2 #define NIC_DCB_DSCP_NUM 0x8 #define NIC_DCB_IP_PRI_MAX 0x40