[PATCH OLK-6.6 0/9] Some bug fix patches for RDMA/hns to olk-6.6

From: Xinghai Cen <cenxinghai@h-partners.com> Some bug fix patches for RDMA/hns to olk-6.6 Chengchang Tang (1): RDMA/hns: Fix a meaningless loop in free_buffer_pages_proc() Junxian Huang (7): RDMA/hns: Change mtr member to pointer in hns QP/CQ/MR/SRQ/EQ struct RDMA/hns: Move mtr_node into the mtr struct RDMA/hns: Fix delayed destruction of db not taking effect RDMA/hns: Fix delay-destruction mechanism not processing kernel db RDMA/hns: Fix mismatched kzalloc vs kvfree RDMA/hns: Fix DCA error path in alloc_wqe_buf() RDMA/hns: Reorder uctx deallocation Yuyu Li (1): RDMA/hns: Fix remove debugfs after device has been unregistered drivers/infiniband/hw/hns/hns_roce_bond.c | 2 +- drivers/infiniband/hw/hns/hns_roce_cq.c | 35 ++--- drivers/infiniband/hw/hns/hns_roce_db.c | 81 ++++++---- drivers/infiniband/hw/hns/hns_roce_dca.c | 8 +- drivers/infiniband/hw/hns/hns_roce_debugfs.c | 10 +- drivers/infiniband/hw/hns/hns_roce_debugfs.h | 3 +- drivers/infiniband/hw/hns/hns_roce_device.h | 69 ++++----- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 81 +++++----- drivers/infiniband/hw/hns/hns_roce_main.c | 23 ++- drivers/infiniband/hw/hns/hns_roce_mr.c | 145 ++++++++---------- drivers/infiniband/hw/hns/hns_roce_qp.c | 40 ++--- drivers/infiniband/hw/hns/hns_roce_restrack.c | 4 +- drivers/infiniband/hw/hns/hns_roce_srq.c | 63 +++----- 13 files changed, 269 insertions(+), 295 deletions(-) -- 2.33.0

driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- Change mtr member to pointer in hns QP/CQ/MR/SRQ/EQ struct to decouple the life cycle of mtr from these structs. This is the preparation for the following refactoring. No functional changes. Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: wenglianfa <wenglianfa@huawei.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_cq.c | 19 ++--- drivers/infiniband/hw/hns/hns_roce_dca.c | 6 +- drivers/infiniband/hw/hns/hns_roce_device.h | 21 ++--- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 81 ++++++++++--------- drivers/infiniband/hw/hns/hns_roce_mr.c | 63 +++++++++------ drivers/infiniband/hw/hns/hns_roce_qp.c | 17 ++-- drivers/infiniband/hw/hns/hns_roce_restrack.c | 4 +- drivers/infiniband/hw/hns/hns_roce_srq.c | 32 ++++---- 8 files changed, 131 insertions(+), 112 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 59f5abd868e5..8aebdfd20baa 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -190,7 +190,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) u64 mtts[MTT_MIN_COUNT] = {}; int ret; - ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts)); + ret = hns_roce_mtr_find(hr_dev, hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts)); if (ret) { ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret); return ret; @@ -211,7 +211,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) } ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, - hns_roce_get_mtr_ba(&hr_cq->mtr)); + hns_roce_get_mtr_ba(hr_cq->mtr)); if (ret) goto err_xa; @@ -261,7 +261,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, { struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_buf_attr buf_attr = {}; - int ret; + int ret = 0; hr_cq->mtr_node = kvmalloc(sizeof(*hr_cq->mtr_node), GFP_KERNEL); if (!hr_cq->mtr_node) @@ -272,10 +272,11 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num; buf_attr.region_count = 1; - ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr, - hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT, - udata, addr); - if (ret) { + hr_cq->mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT, + udata, addr); + if (IS_ERR(hr_cq->mtr)) { + ret = PTR_ERR(hr_cq->mtr); ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret); kvfree(hr_cq->mtr_node); hr_cq->mtr_node = NULL; @@ -287,9 +288,9 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { if (hr_cq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, &hr_cq->mtr); + hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, hr_cq->mtr); } else { - hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr); + hns_roce_mtr_destroy(hr_dev, hr_cq->mtr); kvfree(hr_cq->mtr_node); hr_cq->mtr_node = NULL; } diff --git a/drivers/infiniband/hw/hns/hns_roce_dca.c b/drivers/infiniband/hw/hns/hns_roce_dca.c index eb408130329b..08712231ecd3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_dca.c +++ b/drivers/infiniband/hw/hns/hns_roce_dca.c @@ -327,7 +327,7 @@ int hns_roce_map_dca_safe_page(struct hns_roce_dev *hr_dev, for (i = 0; i < page_count; i++) pages[i] = hr_dev->dca_safe_page; - ret = hns_roce_mtr_map(hr_dev, &hr_qp->mtr, pages, page_count); + ret = hns_roce_mtr_map(hr_dev, hr_qp->mtr, pages, page_count); if (ret) ibdev_err(ibdev, "failed to map safe page for DCA, ret = %d.\n", ret); @@ -341,7 +341,7 @@ static int config_dca_qpc(struct hns_roce_dev *hr_dev, int page_count) { struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_mtr *mtr = &hr_qp->mtr; + struct hns_roce_mtr *mtr = hr_qp->mtr; int ret; ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count); @@ -701,7 +701,7 @@ static u32 alloc_buf_from_dca_mem(struct hns_roce_qp *hr_qp, buf_id = HNS_DCA_TO_BUF_ID(hr_qp->qpn, hr_qp->dca_cfg.attach_count); /* Assign pages from free pages */ - unit_pages = hr_qp->mtr.hem_cfg.is_direct ? buf_pages : 1; + unit_pages = hr_qp->mtr->hem_cfg.is_direct ? buf_pages : 1; alloc_pages = assign_dca_pages(ctx, buf_id, buf_pages, unit_pages); if (buf_pages != alloc_pages) { if (alloc_pages > 0) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 765bfc3d5579..827dfad86855 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -408,7 +408,7 @@ struct hns_roce_mr { int enabled; /* MR's active status */ int type; /* MR's register type */ u32 pbl_hop_num; /* multi-hop number */ - struct hns_roce_mtr pbl_mtr; + struct hns_roce_mtr *pbl_mtr; u32 npages; dma_addr_t *page_list; bool delayed_destroy_flag; @@ -506,7 +506,7 @@ struct hns_roce_db { struct hns_roce_cq { struct ib_cq ib_cq; - struct hns_roce_mtr mtr; + struct hns_roce_mtr *mtr; struct hns_roce_db db; u32 flags; spinlock_t lock; @@ -529,7 +529,7 @@ struct hns_roce_cq { }; struct hns_roce_idx_que { - struct hns_roce_mtr mtr; + struct hns_roce_mtr *mtr; u32 entry_shift; unsigned long *bitmap; u32 head; @@ -551,7 +551,7 @@ struct hns_roce_srq { refcount_t refcount; struct completion free; - struct hns_roce_mtr buf_mtr; + struct hns_roce_mtr *buf_mtr; u64 *wrid; struct hns_roce_idx_que idx_que; @@ -703,7 +703,7 @@ struct hns_roce_qp { enum ib_sig_type sq_signal_bits; struct hns_roce_wq sq; - struct hns_roce_mtr mtr; + struct hns_roce_mtr *mtr; struct hns_roce_dca_cfg dca_cfg; u32 buff_size; @@ -805,7 +805,7 @@ struct hns_roce_eq { int coalesce; int arm_st; int hop_num; - struct hns_roce_mtr mtr; + struct hns_roce_mtr *mtr; u16 eq_max_cnt; u32 eq_period; int shift; @@ -1345,10 +1345,11 @@ static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr) int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, u32 offset, u64 *mtt_buf, int mtt_max); -int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - struct hns_roce_buf_attr *buf_attr, - unsigned int page_shift, struct ib_udata *udata, - unsigned long user_addr); +struct hns_roce_mtr *hns_roce_mtr_create(struct hns_roce_dev *hr_dev, + struct hns_roce_buf_attr *buf_attr, + unsigned int ba_page_shift, + struct ib_udata *udata, + unsigned long user_addr); void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr); int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index be15b709c59c..e4839faac839 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -152,7 +152,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE); /* Data structure reuse may lead to confusion */ - pbl_ba = mr->pbl_mtr.hem_cfg.root_ba; + pbl_ba = mr->pbl_mtr->hem_cfg.root_ba; rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba)); rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba)); @@ -163,7 +163,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages); hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.buf_pg_shift)); hr_reg_clear(fseg, FRMR_BLK_MODE); } @@ -972,12 +972,12 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n) { - return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift); + return hns_roce_buf_offset(srq->buf_mtr->kmem, n << srq->wqe_shift); } static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n) { - return hns_roce_buf_offset(idx_que->mtr.kmem, + return hns_roce_buf_offset(idx_que->mtr->kmem, n << idx_que->entry_shift); } @@ -3487,7 +3487,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, int ret; int i; - ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, + ret = hns_roce_mtr_find(hr_dev, mr->pbl_mtr, 0, pages, min_t(int, ARRAY_SIZE(pages), mr->npages)); if (ret) { ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret); @@ -3498,7 +3498,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, for (i = 0; i < ARRAY_SIZE(pages); i++) pages[i] >>= MPT_PBL_BUF_ADDR_S; - pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); + pbl_ba = hns_roce_get_mtr_ba(mr->pbl_mtr); mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> MPT_PBL_BA_ADDR_S); @@ -3511,7 +3511,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1])); hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.buf_pg_shift)); return 0; } @@ -3554,7 +3554,7 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num); hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.ba_pg_shift)); hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD); return set_mtpt_pbl(hr_dev, mpt_entry, mr); @@ -3598,7 +3598,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) { - dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); + dma_addr_t pbl_ba = hns_roce_get_mtr_ba(mr->pbl_mtr); struct hns_roce_v2_mpt_entry *mpt_entry; mpt_entry = mb_buf; @@ -3617,9 +3617,9 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1); hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.ba_pg_shift)); hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.buf_pg_shift)); mpt_entry->pbl_size = cpu_to_le32(mr->npages); @@ -3757,7 +3757,7 @@ static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev) static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) { - return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); + return hns_roce_buf_offset(hr_cq->mtr->kmem, n * hr_cq->cqe_size); } static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n) @@ -3869,9 +3869,9 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H, upper_32_bits(to_hr_hw_page_addr(mtts[1]))); hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ, - to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(hr_cq->mtr->hem_cfg.ba_pg_shift)); hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ, - to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(hr_cq->mtr->hem_cfg.buf_pg_shift)); hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> CQC_CQE_BA_L_S); hr_reg_write(cq_context, CQC_CQE_BA_H, dma_handle >> CQC_CQE_BA_H_S); hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN, @@ -4727,7 +4727,7 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, int ret; /* Search qp buf's mtts */ - ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.wqe_offset, mtts, + ret = hns_roce_mtr_find(hr_dev, hr_qp->mtr, hr_qp->rq.wqe_offset, mtts, ARRAY_SIZE(mtts)); if (hr_qp->rq.wqe_cnt && ret) { ibdev_err(&hr_dev->ib_dev, @@ -4736,7 +4736,7 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, return ret; } - wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr); + wqe_sge_ba = hns_roce_get_mtr_ba(hr_qp->mtr); context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); qpc_mask->wqe_sge_ba = 0; @@ -4767,11 +4767,11 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM); hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ, - to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(hr_qp->mtr->hem_cfg.ba_pg_shift)); hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ); hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ, - to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(hr_qp->mtr->hem_cfg.buf_pg_shift)); hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ); context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0])); @@ -4805,7 +4805,7 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, int ret; /* search qp buf's mtts */ - ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.wqe_offset, + ret = hns_roce_mtr_find(hr_dev, hr_qp->mtr, hr_qp->sq.wqe_offset, &sq_cur_blk, 1); if (ret) { ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n", @@ -4813,7 +4813,7 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, return ret; } if (hr_qp->sge.sge_cnt > 0) { - ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, + ret = hns_roce_mtr_find(hr_dev, hr_qp->mtr, hr_qp->sge.wqe_offset, &sge_cur_blk, 1); if (ret) { ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n", @@ -6177,7 +6177,7 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, int ret; /* Get physical address of idx que buf */ - ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx, + ret = hns_roce_mtr_find(hr_dev, idx_que->mtr, 0, mtts_idx, ARRAY_SIZE(mtts_idx)); if (ret) { ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n", @@ -6185,7 +6185,7 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, return ret; } - dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr); + dma_handle_idx = hns_roce_get_mtr_ba(idx_que->mtr); hr_reg_write(ctx, SRQC_IDX_HOP_NUM, to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt)); @@ -6195,9 +6195,9 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT)); hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ, - to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(idx_que->mtr->hem_cfg.ba_pg_shift)); hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ, - to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(idx_que->mtr->hem_cfg.buf_pg_shift)); hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L, to_hr_hw_page_addr(mtts_idx[0])); @@ -6224,7 +6224,7 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) memset(ctx, 0, sizeof(*ctx)); /* Get the physical address of srq buf */ - ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe, + ret = hns_roce_mtr_find(hr_dev, srq->buf_mtr, 0, mtts_wqe, ARRAY_SIZE(mtts_wqe)); if (ret) { ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n", @@ -6232,7 +6232,7 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) return ret; } - dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr); + dma_handle_wqe = hns_roce_get_mtr_ba(srq->buf_mtr); hr_reg_write(ctx, SRQC_SRQ_ST, 1); hr_reg_write_bool(ctx, SRQC_SRQ_TYPE, @@ -6254,9 +6254,9 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT)); hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ, - to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(srq->buf_mtr->hem_cfg.ba_pg_shift)); hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ, - to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(srq->buf_mtr->hem_cfg.buf_pg_shift)); if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) { hr_reg_enable(ctx, SRQC_DB_RECORD_EN); @@ -6609,7 +6609,7 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) { struct hns_roce_aeqe *aeqe; - aeqe = hns_roce_buf_offset(eq->mtr.kmem, + aeqe = hns_roce_buf_offset(eq->mtr->kmem, (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); @@ -6677,7 +6677,7 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) { struct hns_roce_ceqe *ceqe; - ceqe = hns_roce_buf_offset(eq->mtr.kmem, + ceqe = hns_roce_buf_offset(eq->mtr->kmem, (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); @@ -6917,7 +6917,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { - hns_roce_mtr_destroy(hr_dev, &eq->mtr); + hns_roce_mtr_destroy(hr_dev, eq->mtr); } static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, @@ -6964,14 +6964,14 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, init_eq_config(hr_dev, eq); /* if not multi-hop, eqe buffer only use one trunk */ - ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, + ret = hns_roce_mtr_find(hr_dev, eq->mtr, 0, eqe_ba, ARRAY_SIZE(eqe_ba)); if (ret) { dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret); return ret; } - bt_ba = hns_roce_get_mtr_ba(&eq->mtr); + bt_ba = hns_roce_get_mtr_ba(eq->mtr); hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID); hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num); @@ -6981,9 +6981,9 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, hr_reg_write(eqc, EQC_EQN, eq->eqn); hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT); hr_reg_write(eqc, EQC_EQE_BA_PG_SZ, - to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(eq->mtr->hem_cfg.ba_pg_shift)); hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ, - to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(eq->mtr->hem_cfg.buf_pg_shift)); hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX); hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt); @@ -7016,7 +7016,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { struct hns_roce_buf_attr buf_attr = {}; - int err; + int err = 0; if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0) eq->hop_num = 0; @@ -7028,11 +7028,12 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) buf_attr.region[0].hopnum = eq->hop_num; buf_attr.region_count = 1; - err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr, - hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL, - 0); - if (err) + eq->mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL, 0); + if (IS_ERR(eq->mtr)) { + err = PTR_ERR(eq->mtr); dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err); + } return err; } diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 84d246fd13b6..7bf6a8481004 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -97,7 +97,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, struct ib_device *ibdev = &hr_dev->ib_dev; bool is_fast = mr->type == MR_TYPE_FRMR; struct hns_roce_buf_attr buf_attr = {}; - int err; + int err = 0; mr->mtr_node = kvmalloc(sizeof(*mr->mtr_node), GFP_KERNEL); if (!mr->mtr_node) @@ -117,17 +117,17 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, buf_attr.adaptive = !is_fast; buf_attr.type = MTR_PBL; - err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr, - hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT, - udata, start); - if (err) { + mr->pbl_mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT, udata, start); + if (IS_ERR(mr->pbl_mtr)) { + err = PTR_ERR(mr->pbl_mtr); ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err); kvfree(mr->mtr_node); mr->mtr_node = NULL; return err; } - mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count; + mr->npages = mr->pbl_mtr->hem_cfg.buf_pg_count; mr->pbl_hop_num = buf_attr.region[0].hopnum; return err; @@ -136,9 +136,9 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA) { - hns_roce_add_unfree_mtr(mr->mtr_node, hr_dev, &mr->pbl_mtr); + hns_roce_add_unfree_mtr(mr->mtr_node, hr_dev, mr->pbl_mtr); } else { - hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr); + hns_roce_mtr_destroy(hr_dev, mr->pbl_mtr); kvfree(mr->mtr_node); mr->mtr_node = NULL; } @@ -214,18 +214,22 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) { struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_mr *mr; - int ret; + int ret = -ENOMEM; mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); + mr->pbl_mtr = kvzalloc(sizeof(*mr->pbl_mtr), GFP_KERNEL); + if (!mr->pbl_mtr) + goto err_mtr; + mr->type = MR_TYPE_DMA; mr->pd = to_hr_pd(pd)->pdn; mr->access = acc; /* Allocate memory region key */ - hns_roce_hem_list_init(&mr->pbl_mtr.hem_list); + hns_roce_hem_list_init(&mr->pbl_mtr->hem_list); ret = alloc_mr_key(hr_dev, mr); if (ret) goto err_free; @@ -241,6 +245,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) free_mr_key(hr_dev, mr); err_free: + kvfree(mr->pbl_mtr); +err_mtr: kfree(mr); return ERR_PTR(ret); } @@ -444,7 +450,7 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) { struct hns_roce_mr *mr = to_hr_mr(ibmr); - if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) { + if (likely(mr->npages < mr->pbl_mtr->hem_cfg.buf_pg_count)) { mr->page_list[mr->npages++] = addr; return 0; } @@ -459,7 +465,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_mr *mr = to_hr_mr(ibmr); - struct hns_roce_mtr *mtr = &mr->pbl_mtr; + struct hns_roce_mtr *mtr = mr->pbl_mtr; int ret, sg_num = 0; if (!IS_ALIGNED(sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) || @@ -468,7 +474,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, return sg_num; mr->npages = 0; - mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, + mr->page_list = kvcalloc(mr->pbl_mtr->hem_cfg.buf_pg_count, sizeof(dma_addr_t), GFP_KERNEL); if (!mr->page_list) return sg_num; @@ -476,7 +482,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset_p, hns_roce_set_page); if (sg_num < 1) { ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", - mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); + mr->npages, mr->pbl_mtr->hem_cfg.buf_pg_count, sg_num); goto err_page_list; } @@ -489,7 +495,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); sg_num = 0; } else { - mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); + mr->pbl_mtr->hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); } err_page_list: @@ -1146,20 +1152,25 @@ static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) * hns_roce_mtr_create - Create hns memory translate region. * * @hr_dev: RoCE device struct pointer - * @mtr: memory translate region * @buf_attr: buffer attribute for creating mtr * @ba_page_shift: page shift for multi-hop base address table * @udata: user space context, if it's NULL, means kernel space * @user_addr: userspace virtual address to start at */ -int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - struct hns_roce_buf_attr *buf_attr, - unsigned int ba_page_shift, struct ib_udata *udata, - unsigned long user_addr) +struct hns_roce_mtr *hns_roce_mtr_create(struct hns_roce_dev *hr_dev, + struct hns_roce_buf_attr *buf_attr, + unsigned int ba_page_shift, + struct ib_udata *udata, + unsigned long user_addr) { struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_mtr *mtr; int ret; + mtr = kvzalloc(sizeof(*mtr), GFP_KERNEL); + if (!mtr) + return ERR_PTR(-ENOMEM); + /* The caller has its own buffer list and invokes the hns_roce_mtr_map() * to finish the MTT configuration. */ @@ -1171,7 +1182,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, if (ret) { ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret); - return ret; + goto err_out; } ret = get_best_page_shift(hr_dev, mtr, buf_attr); @@ -1194,7 +1205,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, } if (buf_attr->mtt_only) - return 0; + return mtr; /* Write buffer's dma address to MTT */ ret = mtr_map_bufs(hr_dev, mtr); @@ -1203,14 +1214,15 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, goto err_alloc_mtt; } - return 0; + return mtr; err_alloc_mtt: mtr_free_mtt(hr_dev, mtr); err_init_buf: mtr_free_bufs(hr_dev, mtr); - - return ret; +err_out: + kvfree(mtr); + return ERR_PTR(ret); } void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) @@ -1220,6 +1232,7 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) /* free buffers */ mtr_free_bufs(hr_dev, mtr); + kvfree(mtr); } static void hns_roce_copy_mtr(struct hns_roce_mtr *new_mtr, struct hns_roce_mtr *old_mtr) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e0ba0ab891dd..81cd20924a67 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -835,7 +835,7 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata, unsigned long addr) { struct ib_device *ibdev = &hr_dev->ib_dev; - int ret; + int ret = 0; hr_qp->mtr_node = kvmalloc(sizeof(*hr_qp->mtr_node), GFP_KERNEL); if (!hr_qp->mtr_node) @@ -862,10 +862,11 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; } - ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, buf_attr, - PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, - udata, addr); - if (ret) { + hr_qp->mtr = hns_roce_mtr_create(hr_dev, buf_attr, + PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, + udata, addr); + if (IS_ERR(hr_qp->mtr)) { + ret = PTR_ERR(hr_qp->mtr); ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); if (dca_en) hns_roce_disable_dca(hr_dev, hr_qp, udata); @@ -882,9 +883,9 @@ static void free_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata) { if (hr_qp->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(hr_qp->mtr_node, hr_dev, &hr_qp->mtr); + hns_roce_add_unfree_mtr(hr_qp->mtr_node, hr_dev, hr_qp->mtr); } else { - hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); + hns_roce_mtr_destroy(hr_dev, hr_qp->mtr); kvfree(hr_qp->mtr_node); hr_qp->mtr_node = NULL; } @@ -1710,7 +1711,7 @@ static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) if (unlikely(hr_qp->dca_cfg.buf_list)) return dca_buf_offset(&hr_qp->dca_cfg, offset); else - return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); + return hns_roce_buf_offset(hr_qp->mtr->kmem, offset); } void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n) diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 34b8e4f85961..d99658ddf689 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -143,11 +143,11 @@ int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr) goto err; if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift", - hr_mr->pbl_mtr.hem_cfg.ba_pg_shift)) + hr_mr->pbl_mtr->hem_cfg.ba_pg_shift)) goto err; if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift", - hr_mr->pbl_mtr.hem_cfg.buf_pg_shift)) + hr_mr->pbl_mtr->hem_cfg.buf_pg_shift)) goto err; nla_nest_end(msg, table_attr); diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 0ab99aa9f9d5..c4161e76ef07 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -172,7 +172,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, struct hns_roce_idx_que *idx_que = &srq->idx_que; struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_buf_attr buf_attr = {}; - int ret; + int ret = 0; idx_que->mtr_node = kvmalloc(sizeof(*idx_que->mtr_node), GFP_KERNEL); if (!idx_que->mtr_node) @@ -186,10 +186,11 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num; buf_attr.region_count = 1; - ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr, - hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT, - udata, addr); - if (ret) { + idx_que->mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT, + udata, addr); + if (IS_ERR(idx_que->mtr)) { + ret = PTR_ERR(idx_que->mtr); ibdev_err(ibdev, "failed to alloc SRQ idx mtr, ret = %d.\n", ret); goto err_kvmalloc; @@ -209,7 +210,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, return 0; err_idx_mtr: - hns_roce_mtr_destroy(hr_dev, &idx_que->mtr); + hns_roce_mtr_destroy(hr_dev, idx_que->mtr); err_kvmalloc: kvfree(idx_que->mtr_node); idx_que->mtr_node = NULL; @@ -224,9 +225,9 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) bitmap_free(idx_que->bitmap); idx_que->bitmap = NULL; if (srq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(idx_que->mtr_node, hr_dev, &idx_que->mtr); + hns_roce_add_unfree_mtr(idx_que->mtr_node, hr_dev, idx_que->mtr); } else { - hns_roce_mtr_destroy(hr_dev, &idx_que->mtr); + hns_roce_mtr_destroy(hr_dev, idx_que->mtr); kvfree(idx_que->mtr_node); idx_que->mtr_node = NULL; } @@ -238,7 +239,7 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, { struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_buf_attr buf_attr = {}; - int ret; + int ret = 0; srq->mtr_node = kvmalloc(sizeof(*srq->mtr_node), GFP_KERNEL); if (!srq->mtr_node) @@ -254,10 +255,11 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num; buf_attr.region_count = 1; - ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr, - hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT, - udata, addr); - if (ret) { + srq->buf_mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT, + udata, addr); + if (IS_ERR(srq->buf_mtr)) { + ret = PTR_ERR(srq->buf_mtr); ibdev_err(ibdev, "failed to alloc SRQ buf mtr, ret = %d.\n", ret); kvfree(srq->mtr_node); @@ -271,9 +273,9 @@ static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) { if (srq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(srq->mtr_node, hr_dev, &srq->buf_mtr); + hns_roce_add_unfree_mtr(srq->mtr_node, hr_dev, srq->buf_mtr); } else { - hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr); + hns_roce_mtr_destroy(hr_dev, srq->buf_mtr); kvfree(srq->mtr_node); srq->mtr_node = NULL; } -- 2.33.0

driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- Previously driver had to copy a new mtr and store it in mtr_node so that it could be found when freeing delayed-destruction resources, because the life cycle of the origin mtr was over when QP/CQ/MR/SRQ structs were freed. But since the life cycle of mtr has been decoupled, driver don't need to copy the mtr now. Move mtr_node into the mtr struct so that mtr can be found with no need to copying a new one. Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_cq.c | 15 ++---- drivers/infiniband/hw/hns/hns_roce_device.h | 14 +----- drivers/infiniband/hw/hns/hns_roce_mr.c | 54 ++++----------------- drivers/infiniband/hw/hns/hns_roce_qp.c | 17 ++----- drivers/infiniband/hw/hns/hns_roce_srq.c | 33 +++---------- 5 files changed, 24 insertions(+), 109 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 8aebdfd20baa..b0fdf073519c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -263,10 +263,6 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, struct hns_roce_buf_attr buf_attr = {}; int ret = 0; - hr_cq->mtr_node = kvmalloc(sizeof(*hr_cq->mtr_node), GFP_KERNEL); - if (!hr_cq->mtr_node) - return -ENOMEM; - buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT; buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num; @@ -278,8 +274,6 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, if (IS_ERR(hr_cq->mtr)) { ret = PTR_ERR(hr_cq->mtr); ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret); - kvfree(hr_cq->mtr_node); - hr_cq->mtr_node = NULL; } return ret; @@ -287,13 +281,10 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { - if (hr_cq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, hr_cq->mtr); - } else { + if (hr_cq->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, hr_cq->mtr); + else hns_roce_mtr_destroy(hr_dev, hr_cq->mtr); - kvfree(hr_cq->mtr_node); - hr_cq->mtr_node = NULL; - } } static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 827dfad86855..f77d59958ef5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -368,6 +368,7 @@ struct hns_roce_mtr { struct ib_umem *umem; /* user space buffer */ struct hns_roce_buf *kmem; /* kernel space buffer */ struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */ + struct list_head node; /* list node for delay-destruction */ }; /* DCA config */ @@ -393,11 +394,6 @@ struct hns_roce_mw { u32 pbl_buf_pg_sz; }; -struct hns_roce_mtr_node { - struct hns_roce_mtr mtr; - struct list_head list; -}; - struct hns_roce_mr { struct ib_mr ibmr; u64 iova; /* MR's virtual original addr */ @@ -412,7 +408,6 @@ struct hns_roce_mr { u32 npages; dma_addr_t *page_list; bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_mr_table { @@ -525,7 +520,6 @@ struct hns_roce_cq { int is_armed; /* cq is armed */ struct list_head node; /* all armed cqs are on a list */ bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_idx_que { @@ -534,7 +528,6 @@ struct hns_roce_idx_que { unsigned long *bitmap; u32 head; u32 tail; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_srq { @@ -561,7 +554,6 @@ struct hns_roce_srq { struct hns_roce_db rdb; u32 cap_flags; bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_uar_table { @@ -742,7 +734,6 @@ struct hns_roce_qp { u8 priority; enum hns_roce_cong_type cong_type; bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; spinlock_t flush_lock; struct hns_roce_dip *dip; }; @@ -1472,8 +1463,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, void hns_roce_add_unfree_umem(struct hns_roce_user_db_page *user_page, struct hns_roce_dev *hr_dev); void hns_roce_free_unfree_umem(struct hns_roce_dev *hr_dev); -void hns_roce_add_unfree_mtr(struct hns_roce_mtr_node *pos, - struct hns_roce_dev *hr_dev, +void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr); void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev); int hns_roce_alloc_scc_param(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 7bf6a8481004..737a7c28acab 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -99,10 +99,6 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, struct hns_roce_buf_attr buf_attr = {}; int err = 0; - mr->mtr_node = kvmalloc(sizeof(*mr->mtr_node), GFP_KERNEL); - if (!mr->mtr_node) - return -ENOMEM; - mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num; buf_attr.page_shift = is_fast ? PAGE_SHIFT : hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT; @@ -122,8 +118,6 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, if (IS_ERR(mr->pbl_mtr)) { err = PTR_ERR(mr->pbl_mtr); ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err); - kvfree(mr->mtr_node); - mr->mtr_node = NULL; return err; } @@ -135,13 +129,10 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { - if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA) { - hns_roce_add_unfree_mtr(mr->mtr_node, hr_dev, mr->pbl_mtr); - } else { + if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA) + hns_roce_add_unfree_mtr(hr_dev, mr->pbl_mtr); + else hns_roce_mtr_destroy(hr_dev, mr->pbl_mtr); - kvfree(mr->mtr_node); - mr->mtr_node = NULL; - } } static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) @@ -1235,49 +1226,22 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) kvfree(mtr); } -static void hns_roce_copy_mtr(struct hns_roce_mtr *new_mtr, struct hns_roce_mtr *old_mtr) -{ - struct list_head *new_head, *old_head; - int i, j; - - memcpy(new_mtr, old_mtr, sizeof(*old_mtr)); - - for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) - for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) { - new_head = &new_mtr->hem_list.mid_bt[i][j]; - old_head = &old_mtr->hem_list.mid_bt[i][j]; - list_replace(old_head, new_head); - } - - new_head = &new_mtr->hem_list.root_bt; - old_head = &old_mtr->hem_list.root_bt; - list_replace(old_head, new_head); - - new_head = &new_mtr->hem_list.btm_bt; - old_head = &old_mtr->hem_list.btm_bt; - list_replace(old_head, new_head); -} - -void hns_roce_add_unfree_mtr(struct hns_roce_mtr_node *pos, - struct hns_roce_dev *hr_dev, +void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) { - hns_roce_copy_mtr(&pos->mtr, mtr); - mutex_lock(&hr_dev->mtr_unfree_list_mutex); - list_add_tail(&pos->list, &hr_dev->mtr_unfree_list); + list_add_tail(&mtr->node, &hr_dev->mtr_unfree_list); mutex_unlock(&hr_dev->mtr_unfree_list_mutex); } void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev) { - struct hns_roce_mtr_node *pos, *next; + struct hns_roce_mtr *mtr, *next; mutex_lock(&hr_dev->mtr_unfree_list_mutex); - list_for_each_entry_safe(pos, next, &hr_dev->mtr_unfree_list, list) { - list_del(&pos->list); - hns_roce_mtr_destroy(hr_dev, &pos->mtr); - kvfree(pos); + list_for_each_entry_safe(mtr, next, &hr_dev->mtr_unfree_list, node) { + list_del(&mtr->node); + hns_roce_mtr_destroy(hr_dev, mtr); } mutex_unlock(&hr_dev->mtr_unfree_list_mutex); } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 81cd20924a67..828bf9e59e27 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -837,18 +837,12 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_device *ibdev = &hr_dev->ib_dev; int ret = 0; - hr_qp->mtr_node = kvmalloc(sizeof(*hr_qp->mtr_node), GFP_KERNEL); - if (!hr_qp->mtr_node) - return -ENOMEM; - if (dca_en) { /* DCA must be enabled after the buffer attr is configured. */ ret = hns_roce_enable_dca(hr_dev, hr_qp, udata); if (ret) { ibdev_err(ibdev, "failed to enable DCA, ret = %d.\n", ret); - kvfree(hr_qp->mtr_node); - hr_qp->mtr_node = NULL; return ret; } @@ -870,8 +864,6 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); if (dca_en) hns_roce_disable_dca(hr_dev, hr_qp, udata); - kvfree(hr_qp->mtr_node); - hr_qp->mtr_node = NULL; } else if (dca_en) { ret = hns_roce_map_dca_safe_page(hr_dev, hr_qp); } @@ -882,13 +874,10 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, static void free_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata) { - if (hr_qp->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(hr_qp->mtr_node, hr_dev, hr_qp->mtr); - } else { + if (hr_qp->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, hr_qp->mtr); + else hns_roce_mtr_destroy(hr_dev, hr_qp->mtr); - kvfree(hr_qp->mtr_node); - hr_qp->mtr_node = NULL; - } if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DYNAMIC_CTX_ATTACH) hns_roce_disable_dca(hr_dev, hr_qp, udata); diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index c4161e76ef07..965ed2d682ad 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -174,10 +174,6 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, struct hns_roce_buf_attr buf_attr = {}; int ret = 0; - idx_que->mtr_node = kvmalloc(sizeof(*idx_que->mtr_node), GFP_KERNEL); - if (!idx_que->mtr_node) - return -ENOMEM; - srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ); buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT; @@ -193,7 +189,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, ret = PTR_ERR(idx_que->mtr); ibdev_err(ibdev, "failed to alloc SRQ idx mtr, ret = %d.\n", ret); - goto err_kvmalloc; + return ret; } if (!udata) { @@ -211,9 +207,6 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, return 0; err_idx_mtr: hns_roce_mtr_destroy(hr_dev, idx_que->mtr); -err_kvmalloc: - kvfree(idx_que->mtr_node); - idx_que->mtr_node = NULL; return ret; } @@ -224,13 +217,10 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) bitmap_free(idx_que->bitmap); idx_que->bitmap = NULL; - if (srq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(idx_que->mtr_node, hr_dev, idx_que->mtr); - } else { + if (srq->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, idx_que->mtr); + else hns_roce_mtr_destroy(hr_dev, idx_que->mtr); - kvfree(idx_que->mtr_node); - idx_que->mtr_node = NULL; - } } static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, @@ -241,10 +231,6 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_buf_attr buf_attr = {}; int ret = 0; - srq->mtr_node = kvmalloc(sizeof(*srq->mtr_node), GFP_KERNEL); - if (!srq->mtr_node) - return -ENOMEM; - srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE, HNS_ROCE_SGE_SIZE * srq->max_gs))); @@ -262,8 +248,6 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, ret = PTR_ERR(srq->buf_mtr); ibdev_err(ibdev, "failed to alloc SRQ buf mtr, ret = %d.\n", ret); - kvfree(srq->mtr_node); - srq->mtr_node = NULL; } return ret; @@ -272,13 +256,10 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) { - if (srq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(srq->mtr_node, hr_dev, srq->buf_mtr); - } else { + if (srq->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, srq->buf_mtr); + else hns_roce_mtr_destroy(hr_dev, srq->buf_mtr); - kvfree(srq->mtr_node); - srq->mtr_node = NULL; - } } static int alloc_srq_wrid(struct hns_roce_srq *srq) -- 2.33.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- Driver allocates and frees db in a unit of page. One db page will be shared by multiple dbs. Currently the delayed destruiction of db only depends on the delayed_unmap_flag of the db itself. It means if this flag of the last db in a page is not set, this page will still be freed, and the delayed-destruction mechanism won't take effect despite the flag of some previous dbs may be set. A db page is associated with a umem_node. Add a flag to umem_node to indicate whether this page should be delayed-destroyed. Fixes: 04c5d76e4f15 ("RDMA/hns: Fix simultaneous reset and resource deregistration") Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_db.c | 7 +++++-- drivers/infiniband/hw/hns/hns_roce_device.h | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 5adc2f1fa319..6e276a45b3ea 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -68,17 +68,20 @@ void hns_roce_db_unmap_user(struct hns_roce_ucontext *context, bool delayed_unmap_flag) { struct hns_roce_dev *hr_dev = to_hr_dev(context->ibucontext.device); + struct hns_roce_umem_node *umem_node = db->u.user_page->umem_node; mutex_lock(&context->page_mutex); + umem_node->delayed_unmap_flag |= delayed_unmap_flag; + refcount_dec(&db->u.user_page->refcount); if (refcount_dec_if_one(&db->u.user_page->refcount)) { list_del(&db->u.user_page->list); - if (delayed_unmap_flag) { + if (umem_node->delayed_unmap_flag) { hns_roce_add_unfree_umem(db->u.user_page, hr_dev); } else { ib_umem_release(db->u.user_page->umem); - kvfree(db->u.user_page->umem_node); + kvfree(umem_node); } kfree(db->u.user_page); } diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index f77d59958ef5..3a58fd1b1364 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -477,6 +477,7 @@ struct hns_roce_db_pgdir { struct hns_roce_umem_node { struct ib_umem *umem; struct list_head list; + bool delayed_unmap_flag; }; struct hns_roce_user_db_page { -- 2.33.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- Kernel dbs are not processed by delayed-destruction mechanism. This may lead to HW UAF described in the fixes commit. Expand the hns_roce_umem_node to hns_roce_db_pg_node with kernel db information. This struct is now used by both userspace and kernel db pages. Fixes: 04c5d76e4f15 ("RDMA/hns: Fix simultaneous reset and resource deregistration") Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_cq.c | 3 +- drivers/infiniband/hw/hns/hns_roce_db.c | 82 +++++++++++++-------- drivers/infiniband/hw/hns/hns_roce_device.h | 35 +++++---- drivers/infiniband/hw/hns/hns_roce_main.c | 10 +-- drivers/infiniband/hw/hns/hns_roce_mr.c | 30 ++++---- drivers/infiniband/hw/hns/hns_roce_qp.c | 3 +- drivers/infiniband/hw/hns/hns_roce_srq.c | 2 +- 7 files changed, 97 insertions(+), 68 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index b0fdf073519c..a18d379d401c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -338,7 +338,8 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, hns_roce_db_unmap_user(uctx, &hr_cq->db, hr_cq->delayed_destroy_flag); } else { - hns_roce_free_db(hr_dev, &hr_cq->db); + hns_roce_free_db(hr_dev, &hr_cq->db, + hr_cq->delayed_destroy_flag); } } diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 6e276a45b3ea..d9b217891b93 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -12,6 +12,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, { unsigned long page_addr = virt & PAGE_MASK; struct hns_roce_user_db_page *page; + struct ib_umem *umem; unsigned int offset; int ret = 0; @@ -29,32 +30,33 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, refcount_set(&page->refcount, 1); page->user_virt = page_addr; - page->umem = ib_umem_get(context->ibucontext.device, page_addr, - PAGE_SIZE, 0); - if (IS_ERR(page->umem)) { - ret = PTR_ERR(page->umem); + page->db_node = kvzalloc(sizeof(*page->db_node), GFP_KERNEL); + if (!page->db_node) { + ret = -ENOMEM; goto err_page; } - page->umem_node = kvmalloc(sizeof(*page->umem_node), GFP_KERNEL); - if (!page->umem_node) { - ret = -ENOMEM; - goto err_umem; + + umem = ib_umem_get(context->ibucontext.device, page_addr, PAGE_SIZE, 0); + if (IS_ERR(umem)) { + ret = PTR_ERR(umem); + goto err_dbnode; } + page->db_node->umem = umem; list_add(&page->list, &context->page_list); found: offset = virt - page_addr; - db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; - db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; + db->dma = sg_dma_address(page->db_node->umem->sgt_append.sgt.sgl) + offset; + db->virt_addr = sg_virt(page->db_node->umem->sgt_append.sgt.sgl) + offset; db->u.user_page = page; refcount_inc(&page->refcount); mutex_unlock(&context->page_mutex); return 0; -err_umem: - ib_umem_release(page->umem); +err_dbnode: + kvfree(page->db_node); err_page: kfree(page); err_out: @@ -68,20 +70,20 @@ void hns_roce_db_unmap_user(struct hns_roce_ucontext *context, bool delayed_unmap_flag) { struct hns_roce_dev *hr_dev = to_hr_dev(context->ibucontext.device); - struct hns_roce_umem_node *umem_node = db->u.user_page->umem_node; + struct hns_roce_db_pg_node *db_node = db->u.user_page->db_node; mutex_lock(&context->page_mutex); - umem_node->delayed_unmap_flag |= delayed_unmap_flag; + db_node->delayed_unmap_flag |= delayed_unmap_flag; refcount_dec(&db->u.user_page->refcount); if (refcount_dec_if_one(&db->u.user_page->refcount)) { list_del(&db->u.user_page->list); - if (umem_node->delayed_unmap_flag) { - hns_roce_add_unfree_umem(db->u.user_page, hr_dev); + if (db_node->delayed_unmap_flag) { + hns_roce_add_unfree_db(db_node, hr_dev); } else { - ib_umem_release(db->u.user_page->umem); - kvfree(umem_node); + ib_umem_release(db_node->umem); + kvfree(db_node); } kfree(db->u.user_page); } @@ -93,6 +95,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir( struct device *dma_device) { struct hns_roce_db_pgdir *pgdir; + dma_addr_t db_dma; + u32 *page; pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); if (!pgdir) @@ -102,14 +106,24 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir( HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT); pgdir->bits[0] = pgdir->order0; pgdir->bits[1] = pgdir->order1; - pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE, - &pgdir->db_dma, GFP_KERNEL); - if (!pgdir->page) { - kfree(pgdir); - return NULL; - } + pgdir->db_node = kvzalloc(sizeof(*pgdir->db_node), GFP_KERNEL); + if (!pgdir->db_node) + goto err_node; + + page = dma_alloc_coherent(dma_device, PAGE_SIZE, &db_dma, GFP_KERNEL); + if (!page) + goto err_dma; + + pgdir->db_node->kdb.page = page; + pgdir->db_node->kdb.db_dma = db_dma; return pgdir; + +err_dma: + kvfree(pgdir->db_node); +err_node: + kfree(pgdir); + return NULL; } static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir, @@ -136,8 +150,8 @@ static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir, db->u.pgdir = pgdir; db->index = i; - db->db_record = pgdir->page + db->index; - db->dma = pgdir->db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE; + db->db_record = pgdir->db_node->kdb.page + db->index; + db->dma = pgdir->db_node->kdb.db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE; db->order = order; return 0; @@ -172,13 +186,17 @@ int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, return ret; } -void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) +void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, + bool delayed_unmap_flag) { + struct hns_roce_db_pg_node *db_node = db->u.pgdir->db_node; unsigned long o; unsigned long i; mutex_lock(&hr_dev->pgdir_mutex); + db_node->delayed_unmap_flag |= delayed_unmap_flag; + o = db->order; i = db->index; @@ -192,9 +210,15 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) { - dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page, - db->u.pgdir->db_dma); list_del(&db->u.pgdir->list); + if (db_node->delayed_unmap_flag) { + hns_roce_add_unfree_db(db_node, hr_dev); + } else { + dma_free_coherent(hr_dev->dev, PAGE_SIZE, + db_node->kdb.page, + db_node->kdb.db_dma); + kvfree(db_node); + } kfree(db->u.pgdir); } diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 3a58fd1b1364..9b81f2974c82 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -465,27 +465,29 @@ struct hns_roce_buf { unsigned int page_shift; }; +struct hns_roce_db_pg_node { + struct list_head list; + struct ib_umem *umem; + struct { + u32 *page; + dma_addr_t db_dma; + } kdb; + bool delayed_unmap_flag; +}; + struct hns_roce_db_pgdir { struct list_head list; DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE); DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT); unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT]; - u32 *page; - dma_addr_t db_dma; -}; - -struct hns_roce_umem_node { - struct ib_umem *umem; - struct list_head list; - bool delayed_unmap_flag; + struct hns_roce_db_pg_node *db_node; }; struct hns_roce_user_db_page { struct list_head list; - struct ib_umem *umem; unsigned long user_virt; refcount_t refcount; - struct hns_roce_umem_node *umem_node; + struct hns_roce_db_pg_node *db_node; }; struct hns_roce_db { @@ -1154,8 +1156,8 @@ struct hns_roce_dev { struct list_head mtr_unfree_list; /* list of unfree mtr on this dev */ struct mutex mtr_unfree_list_mutex; /* protect mtr_unfree_list */ - struct list_head umem_unfree_list; /* list of unfree umem on this dev */ - struct mutex umem_unfree_list_mutex; /* protect umem_unfree_list */ + struct list_head db_unfree_list; /* list of unfree db on this dev */ + struct mutex db_unfree_list_mutex; /* protect db_unfree_list */ void *dca_safe_buf; dma_addr_t dca_safe_page; @@ -1438,7 +1440,8 @@ void hns_roce_db_unmap_user(struct hns_roce_ucontext *context, bool delayed_unmap_flag); int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, int order); -void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db); +void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, + bool delayed_unmap_flag); void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); @@ -1461,9 +1464,9 @@ struct hns_user_mmap_entry * hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, size_t length, enum hns_roce_mmap_type mmap_type); -void hns_roce_add_unfree_umem(struct hns_roce_user_db_page *user_page, - struct hns_roce_dev *hr_dev); -void hns_roce_free_unfree_umem(struct hns_roce_dev *hr_dev); +void hns_roce_add_unfree_db(struct hns_roce_db_pg_node *db_node, + struct hns_roce_dev *hr_dev); +void hns_roce_free_unfree_db(struct hns_roce_dev *hr_dev); void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr); void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 8f110e64e601..a0b9b9f1ff2c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -1235,7 +1235,7 @@ static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev) hns_roce_cleanup_dca(hr_dev); hns_roce_cleanup_bitmap(hr_dev); - mutex_destroy(&hr_dev->umem_unfree_list_mutex); + mutex_destroy(&hr_dev->db_unfree_list_mutex); mutex_destroy(&hr_dev->mtr_unfree_list_mutex); mutex_destroy(&hr_dev->uctx_list_mutex); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || @@ -1264,8 +1264,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) INIT_LIST_HEAD(&hr_dev->mtr_unfree_list); mutex_init(&hr_dev->mtr_unfree_list_mutex); - INIT_LIST_HEAD(&hr_dev->umem_unfree_list); - mutex_init(&hr_dev->umem_unfree_list_mutex); + INIT_LIST_HEAD(&hr_dev->db_unfree_list); + mutex_init(&hr_dev->db_unfree_list_mutex); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { @@ -1309,7 +1309,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) mutex_destroy(&hr_dev->pgdir_mutex); - mutex_destroy(&hr_dev->umem_unfree_list_mutex); + mutex_destroy(&hr_dev->db_unfree_list_mutex); mutex_destroy(&hr_dev->mtr_unfree_list_mutex); mutex_destroy(&hr_dev->uctx_list_mutex); @@ -1503,7 +1503,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev, bool bond_cleanup) if (hr_dev->hw->hw_exit) hr_dev->hw->hw_exit(hr_dev); - hns_roce_free_unfree_umem(hr_dev); + hns_roce_free_unfree_db(hr_dev); hns_roce_free_unfree_mtr(hr_dev); hns_roce_teardown_hca(hr_dev); hns_roce_cleanup_hem(hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 737a7c28acab..b3a1e5b4cd8d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -1246,27 +1246,27 @@ void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev) mutex_unlock(&hr_dev->mtr_unfree_list_mutex); } -void hns_roce_add_unfree_umem(struct hns_roce_user_db_page *user_page, - struct hns_roce_dev *hr_dev) +void hns_roce_add_unfree_db(struct hns_roce_db_pg_node *db_node, + struct hns_roce_dev *hr_dev) { - struct hns_roce_umem_node *pos = user_page->umem_node; - - pos->umem = user_page->umem; - - mutex_lock(&hr_dev->umem_unfree_list_mutex); - list_add_tail(&pos->list, &hr_dev->umem_unfree_list); - mutex_unlock(&hr_dev->umem_unfree_list_mutex); + mutex_lock(&hr_dev->db_unfree_list_mutex); + list_add_tail(&db_node->list, &hr_dev->db_unfree_list); + mutex_unlock(&hr_dev->db_unfree_list_mutex); } -void hns_roce_free_unfree_umem(struct hns_roce_dev *hr_dev) +void hns_roce_free_unfree_db(struct hns_roce_dev *hr_dev) { - struct hns_roce_umem_node *pos, *next; + struct hns_roce_db_pg_node *pos, *next; - mutex_lock(&hr_dev->umem_unfree_list_mutex); - list_for_each_entry_safe(pos, next, &hr_dev->umem_unfree_list, list) { + mutex_lock(&hr_dev->db_unfree_list_mutex); + list_for_each_entry_safe(pos, next, &hr_dev->db_unfree_list, list) { list_del(&pos->list); - ib_umem_release(pos->umem); + if (pos->umem) + ib_umem_release(pos->umem); + else + dma_free_coherent(hr_dev->dev, PAGE_SIZE, + pos->kdb.page, pos->kdb.db_dma); kvfree(pos); } - mutex_unlock(&hr_dev->umem_unfree_list_mutex); + mutex_unlock(&hr_dev->db_unfree_list_mutex); } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 828bf9e59e27..b888955ec0b3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1131,7 +1131,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, qp_user_mmap_entry_remove(hr_qp); } else { if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) - hns_roce_free_db(hr_dev, &hr_qp->rdb); + hns_roce_free_db(hr_dev, &hr_qp->rdb, + hr_qp->delayed_destroy_flag); } } diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 965ed2d682ad..1262e9535c22 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -431,7 +431,7 @@ static void free_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, hns_roce_db_unmap_user(uctx, &srq->rdb, srq->delayed_destroy_flag); } else { - hns_roce_free_db(hr_dev, &srq->rdb); + hns_roce_free_db(hr_dev, &srq->rdb, srq->delayed_destroy_flag); } } -- 2.33.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- die_info is allocated with kzalloc() and should be freed with kfree() instead of kvfree(). Fixes: 2004b3f9092a ("RDMA/hns: Support RoCE bonding") Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_bond.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_bond.c b/drivers/infiniband/hw/hns/hns_roce_bond.c index f76667335189..1f5eb0eca246 100644 --- a/drivers/infiniband/hw/hns/hns_roce_bond.c +++ b/drivers/infiniband/hw/hns/hns_roce_bond.c @@ -606,7 +606,7 @@ static struct hns_roce_die_info *alloc_die_info(int bus_num) static void dealloc_die_info(struct hns_roce_die_info *die_info, u8 bus_num) { xa_erase(&roce_bond_xa, bus_num); - kvfree(die_info); + kfree(die_info); } static int alloc_bond_id(struct hns_roce_bond_group *bond_grp) -- 2.33.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- When hns_roce_map_dca_safe_page() fails in alloc_wqe_buf(), hns_roce_disable_dca() should be called in error path. Fixes: e519f15b907a ("RDMA/hns: Fix possible RAS when DCA is not attached") Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_qp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index b888955ec0b3..90da0080a93e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -862,12 +862,13 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, if (IS_ERR(hr_qp->mtr)) { ret = PTR_ERR(hr_qp->mtr); ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); - if (dca_en) - hns_roce_disable_dca(hr_dev, hr_qp, udata); } else if (dca_en) { ret = hns_roce_map_dca_safe_page(hr_dev, hr_qp); } + if (ret && dca_en) + hns_roce_disable_dca(hr_dev, hr_qp, udata); + return ret; } -- 2.33.0

From: Chengchang Tang <tangchengchang@huawei.com> driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- The iterated element does not change, making the loop meaningless. This patch fixes it. Fixes: 10bb3b802412 ("RDMA/hns: Add method for attaching WQE buffer") Signed-off-by: Yuyu Li <liyuyu6@huawei.com> Signed-off-by: Chengchang Tang <tangchengchang@huawei.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_dca.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_dca.c b/drivers/infiniband/hw/hns/hns_roce_dca.c index 08712231ecd3..53184e1c71b7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_dca.c +++ b/drivers/infiniband/hw/hns/hns_roce_dca.c @@ -899,7 +899,7 @@ static int free_buffer_pages_proc(struct dca_mem *mem, int index, void *param) } for (; changed && i < mem->page_count; i++) - if (dca_page_is_free(state)) + if (dca_page_is_free(&mem->states[i])) free_pages++; if (changed && free_pages == mem->page_count) -- 2.33.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- Reorder hns_roce_dealloc_ucontext() to fit with the reverse order of allocation. Fixes: 640cb0880216 ("RDMA/hns: Add debugfs support for DCA") Fixes: 5b4694150feb ("RDMA/hns: Use one CQ bank per context") Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_main.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index a0b9b9f1ff2c..187bf1bf1bfc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -632,21 +632,20 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device); + hns_roce_put_cq_bankid_for_uctx(context); + hns_roce_unregister_uctx_debugfs(context); + mutex_lock(&hr_dev->uctx_list_mutex); list_del(&context->list); mutex_unlock(&hr_dev->uctx_list_mutex); + hns_roce_unregister_udca(hr_dev, context); + hns_roce_dealloc_reset_entry(context); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) mutex_destroy(&context->page_mutex); - hns_roce_put_cq_bankid_for_uctx(context); - hns_roce_unregister_uctx_debugfs(context); - - hns_roce_unregister_udca(hr_dev, context); - hns_roce_dealloc_uar_entry(context); - hns_roce_dealloc_reset_entry(context); ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); } -- 2.33.0

From: Yuyu Li <liyuyu6@huawei.com> driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- hns_roce_unregister_device() will be problematic if it is in front of hns_roce_unregister_debugfs(), hns_roce_unregister_device() will free up ib_deivce, and there will be a risk if debugfs uses ib_deivce. Conversely, if hns_roce_unregister_debugfs() is in front of hns_roce_unregister_device(), when the driver is uninstalled, the former will delete the entire debugfs directory, and the latter will destroy all the debugfs subdirectories of uctx, and an error will occur because the debugfs directory of uctx no longer exists. Now put hns_roce_unregister_debugfs() in front of hns_roce_unregister_device () and leave the all debugfs root NULL after hns_roce_unregister_debugfs(), so that call hns_roce_unregister_device () again, if the debugfs directory of uctx is determined to be empty, return it directly without throwing an error. The above problems can be solved in this patch. Fixes: 640cb0880216 ("RDMA/hns: Add debugfs support for DCA") Signed-off-by: Yuyu Li <liyuyu6@huawei.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_debugfs.c | 10 ++++++++-- drivers/infiniband/hw/hns/hns_roce_debugfs.h | 3 ++- drivers/infiniband/hw/hns/hns_roce_main.c | 4 ++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.c b/drivers/infiniband/hw/hns/hns_roce_debugfs.c index 7023c3cefaa7..756e43cace43 100644 --- a/drivers/infiniband/hw/hns/hns_roce_debugfs.c +++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.c @@ -486,9 +486,14 @@ void hns_roce_register_uctx_debugfs(struct hns_roce_dev *hr_dev, hr_dev, uctx); } -void hns_roce_unregister_uctx_debugfs(struct hns_roce_ucontext *uctx) +void hns_roce_unregister_uctx_debugfs(struct hns_roce_dev *hr_dev, + struct hns_roce_ucontext *uctx) { - debugfs_remove_recursive(uctx->dca_dbgfs.root); + struct hns_dca_debugfs *dca_dbgfs = &hr_dev->dbgfs.dca_root; + char name[DCA_CTX_PID_LEN]; + + snprintf(name, sizeof(name), "%d", uctx->pid); + debugfs_lookup_and_remove(name, dca_dbgfs->root); } /* debugfs for device */ @@ -508,6 +513,7 @@ void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev) void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev) { debugfs_remove_recursive(hr_dev->dbgfs.root); + memset(&hr_dev->dbgfs, 0, sizeof(hr_dev->dbgfs)); } /* debugfs for hns module */ diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.h b/drivers/infiniband/hw/hns/hns_roce_debugfs.h index 7fff3aa98ad2..f5bdc1e99e0c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_debugfs.h +++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.h @@ -47,6 +47,7 @@ void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev); void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev); void hns_roce_register_uctx_debugfs(struct hns_roce_dev *hr_dev, struct hns_roce_ucontext *uctx); -void hns_roce_unregister_uctx_debugfs(struct hns_roce_ucontext *uctx); +void hns_roce_unregister_uctx_debugfs(struct hns_roce_dev *hr_dev, + struct hns_roce_ucontext *uctx); #endif diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 187bf1bf1bfc..a77dc1d70a1c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -633,7 +633,7 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device); hns_roce_put_cq_bankid_for_uctx(context); - hns_roce_unregister_uctx_debugfs(context); + hns_roce_unregister_uctx_debugfs(hr_dev, context); mutex_lock(&hr_dev->uctx_list_mutex); list_del(&context->list); @@ -1494,9 +1494,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) void hns_roce_exit(struct hns_roce_dev *hr_dev, bool bond_cleanup) { + hns_roce_unregister_debugfs(hr_dev); hns_roce_unregister_device(hr_dev, bond_cleanup); hns_roce_dealloc_scc_param(hr_dev); - hns_roce_unregister_debugfs(hr_dev); hns_roce_free_dca_safe_buf(hr_dev); -- 2.33.0

反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/15711 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/NUK... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/15711 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/NUK...
participants (2)
-
Junxian Huang
-
patchwork bot