From: Wenpeng Liang liangwenpeng@huawei.com
mainline inclusion from mainline-for-next commit 40b4b79c866f category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I5TAQ5 CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git/commit/?id=40b...
----------------------------------------------------------------------
There is no need to use a dedicated DXF file and DFX structure to manage the interface of the query queue context.
Link: https://lore.kernel.org/r/20220822104455.2311053-2-liangwenpeng@huawei.com Signed-off-by: Wenpeng Liang liangwenpeng@huawei.com Signed-off-by: Leon Romanovsky leon@kernel.org Signed-off-by: Zhengfeng Luo luozhengfeng@h-partners.com Reviewed-by: Yangyang Li liyangyang20@huawei.com Reviewed-by: YueHaibing yuehaibing@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/infiniband/hw/hns/Makefile | 2 +- drivers/infiniband/hw/hns/hns_roce_device.h | 10 ++---- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 35 ++++++++++++++++--- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 3 -- drivers/infiniband/hw/hns/hns_roce_main.c | 6 +++- drivers/infiniband/hw/hns/hns_roce_restrack.c | 35 +++++++------------ 6 files changed, 50 insertions(+), 41 deletions(-)
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index 9f04f25d9631..a7d259238305 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile @@ -10,6 +10,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
ifdef CONFIG_INFINIBAND_HNS_HIP08 -hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) +hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs) obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o endif diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 2d29221055be..848f0e0e76a4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -849,11 +849,6 @@ struct hns_roce_caps { enum cong_type cong_type; };
-struct hns_roce_dfx_hw { - int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn, - int *buffer); -}; - enum hns_roce_device_state { HNS_ROCE_DEVICE_STATE_INITED, HNS_ROCE_DEVICE_STATE_RST_DOWN, @@ -899,6 +894,7 @@ struct hns_roce_hw { int (*init_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev); int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf); + int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer); const struct ib_device_ops *hns_roce_dev_ops; const struct ib_device_ops *hns_roce_dev_srq_ops; }; @@ -960,7 +956,6 @@ struct hns_roce_dev { void *priv; struct workqueue_struct *irq_workq; struct work_struct ecc_work; - const struct hns_roce_dfx_hw *dfx; u32 func_num; u32 is_vf; u32 cong_algo_tmpl_id; @@ -1223,8 +1218,7 @@ u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev); -int hns_roce_fill_res_cq_entry(struct sk_buff *msg, - struct ib_cq *ib_cq); +int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq); struct hns_user_mmap_entry * hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, size_t length, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index de7873551ffc..6b772536ab3c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5801,6 +5801,35 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) return ret; }
+static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn, + void *buffer) +{ + struct hns_roce_v2_cq_context *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, + HNS_ROCE_CMD_QUERY_CQC, cqn); + if (ret) { + ibdev_err(&hr_dev->ib_dev, + "failed to process cmd when querying CQ, ret = %d.\n", + ret); + goto err_mailbox; + } + + memcpy(buffer, context, sizeof(*context)); + +err_mailbox: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + static void hns_roce_irq_work_handle(struct work_struct *work) { struct hns_roce_work *irq_work = @@ -6593,10 +6622,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) kfree(eq_table->eq); }
-static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = { - .query_cqc_info = hns_roce_v2_query_cqc_info, -}; - static const struct ib_device_ops hns_roce_v2_dev_ops = { .destroy_qp = hns_roce_v2_destroy_qp, .modify_cq = hns_roce_v2_modify_cq, @@ -6637,6 +6662,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .init_eq = hns_roce_v2_init_eq_table, .cleanup_eq = hns_roce_v2_cleanup_eq_table, .write_srqc = hns_roce_v2_write_srqc, + .query_cqc = hns_roce_v2_query_cqc, .hns_roce_dev_ops = &hns_roce_v2_dev_ops, .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops, }; @@ -6668,7 +6694,6 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, hr_dev->is_vf = id->driver_data; hr_dev->dev = &handle->pdev->dev; hr_dev->hw = &hns_roce_hw_v2; - hr_dev->dfx = &hns_roce_dfx_hw_v2; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->odb_offset = hr_dev->sdb_offset;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 8aa56c24e677..6bf878fe661e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -1455,9 +1455,6 @@ struct hns_roce_sccc_clr_done { __le32 rsv[5]; };
-int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, - int *buffer); - int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 11f42cebfa40..9d4dbc3205c6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -514,7 +514,6 @@ static const struct ib_device_ops hns_roce_dev_ops = { .destroy_ah = hns_roce_destroy_ah, .destroy_cq = hns_roce_destroy_cq, .disassociate_ucontext = hns_roce_disassociate_ucontext, - .fill_res_cq_entry = hns_roce_fill_res_cq_entry, .get_dma_mr = hns_roce_get_dma_mr, .get_link_layer = hns_roce_get_link_layer, .get_port_immutable = hns_roce_port_immutable, @@ -564,6 +563,10 @@ static const struct ib_device_ops hns_roce_dev_xrcd_ops = { INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd), };
+static const struct ib_device_ops hns_roce_dev_restrack_ops = { + .fill_res_cq_entry = hns_roce_fill_res_cq_entry, +}; + static int hns_roce_register_device(struct hns_roce_dev *hr_dev) { int ret; @@ -642,6 +645,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops); ib_set_device_ops(ib_dev, &hns_roce_dev_ops); + ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops); for (i = 0; i < hr_dev->caps.num_ports; i++) { if (!hr_dev->iboe.netdevs[i]) continue; diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 24a154d64630..83417be15d3f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -55,45 +55,34 @@ static int hns_roce_fill_cq(struct sk_buff *msg, return -EMSGSIZE; }
-int hns_roce_fill_res_cq_entry(struct sk_buff *msg, - struct ib_cq *ib_cq) +int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq) { struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); - struct hns_roce_v2_cq_context *context; + struct hns_roce_v2_cq_context context; struct nlattr *table_attr; int ret;
- if (!hr_dev->dfx->query_cqc_info) + if (!hr_dev->hw->query_cqc) return -EINVAL;
- context = kzalloc(sizeof(struct hns_roce_v2_cq_context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context); + ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context); if (ret) - goto err; + return -EINVAL;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); - if (!table_attr) { - ret = -EMSGSIZE; - goto err; - } + if (!table_attr) + return -EMSGSIZE;
- if (hns_roce_fill_cq(msg, context)) { - ret = -EMSGSIZE; - goto err_cancel_table; - } + if (hns_roce_fill_cq(msg, &context)) + goto err;
nla_nest_end(msg, table_attr); - kfree(context);
return 0;
-err_cancel_table: - nla_nest_cancel(msg, table_attr); err: - kfree(context); - return ret; + nla_nest_cancel(msg, table_attr); + + return -EMSGSIZE; }