
driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBV8UW ---------------------------------------------------------------------- Previously driver had to copy a new mtr and store it in mtr_node so that it could be found when freeing delayed-destruction resources, because the life cycle of the origin mtr was over when QP/CQ/MR/SRQ structs were freed. But since the life cycle of mtr has been decoupled, driver don't need to copy the mtr now. Move mtr_node into the mtr struct so that mtr can be found with no need to copying a new one. Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- drivers/infiniband/hw/hns/hns_roce_cq.c | 15 ++---- drivers/infiniband/hw/hns/hns_roce_device.h | 14 +----- drivers/infiniband/hw/hns/hns_roce_mr.c | 54 ++++----------------- drivers/infiniband/hw/hns/hns_roce_qp.c | 17 ++----- drivers/infiniband/hw/hns/hns_roce_srq.c | 33 +++---------- 5 files changed, 24 insertions(+), 109 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 8aebdfd20baa..b0fdf073519c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -263,10 +263,6 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, struct hns_roce_buf_attr buf_attr = {}; int ret = 0; - hr_cq->mtr_node = kvmalloc(sizeof(*hr_cq->mtr_node), GFP_KERNEL); - if (!hr_cq->mtr_node) - return -ENOMEM; - buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT; buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num; @@ -278,8 +274,6 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, if (IS_ERR(hr_cq->mtr)) { ret = PTR_ERR(hr_cq->mtr); ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret); - kvfree(hr_cq->mtr_node); - hr_cq->mtr_node = NULL; } return ret; @@ -287,13 +281,10 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { - if (hr_cq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, hr_cq->mtr); - } else { + if (hr_cq->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, hr_cq->mtr); + else hns_roce_mtr_destroy(hr_dev, hr_cq->mtr); - kvfree(hr_cq->mtr_node); - hr_cq->mtr_node = NULL; - } } static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 827dfad86855..f77d59958ef5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -368,6 +368,7 @@ struct hns_roce_mtr { struct ib_umem *umem; /* user space buffer */ struct hns_roce_buf *kmem; /* kernel space buffer */ struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */ + struct list_head node; /* list node for delay-destruction */ }; /* DCA config */ @@ -393,11 +394,6 @@ struct hns_roce_mw { u32 pbl_buf_pg_sz; }; -struct hns_roce_mtr_node { - struct hns_roce_mtr mtr; - struct list_head list; -}; - struct hns_roce_mr { struct ib_mr ibmr; u64 iova; /* MR's virtual original addr */ @@ -412,7 +408,6 @@ struct hns_roce_mr { u32 npages; dma_addr_t *page_list; bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_mr_table { @@ -525,7 +520,6 @@ struct hns_roce_cq { int is_armed; /* cq is armed */ struct list_head node; /* all armed cqs are on a list */ bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_idx_que { @@ -534,7 +528,6 @@ struct hns_roce_idx_que { unsigned long *bitmap; u32 head; u32 tail; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_srq { @@ -561,7 +554,6 @@ struct hns_roce_srq { struct hns_roce_db rdb; u32 cap_flags; bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_uar_table { @@ -742,7 +734,6 @@ struct hns_roce_qp { u8 priority; enum hns_roce_cong_type cong_type; bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; spinlock_t flush_lock; struct hns_roce_dip *dip; }; @@ -1472,8 +1463,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, void hns_roce_add_unfree_umem(struct hns_roce_user_db_page *user_page, struct hns_roce_dev *hr_dev); void hns_roce_free_unfree_umem(struct hns_roce_dev *hr_dev); -void hns_roce_add_unfree_mtr(struct hns_roce_mtr_node *pos, - struct hns_roce_dev *hr_dev, +void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr); void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev); int hns_roce_alloc_scc_param(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 7bf6a8481004..737a7c28acab 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -99,10 +99,6 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, struct hns_roce_buf_attr buf_attr = {}; int err = 0; - mr->mtr_node = kvmalloc(sizeof(*mr->mtr_node), GFP_KERNEL); - if (!mr->mtr_node) - return -ENOMEM; - mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num; buf_attr.page_shift = is_fast ? PAGE_SHIFT : hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT; @@ -122,8 +118,6 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, if (IS_ERR(mr->pbl_mtr)) { err = PTR_ERR(mr->pbl_mtr); ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err); - kvfree(mr->mtr_node); - mr->mtr_node = NULL; return err; } @@ -135,13 +129,10 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { - if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA) { - hns_roce_add_unfree_mtr(mr->mtr_node, hr_dev, mr->pbl_mtr); - } else { + if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA) + hns_roce_add_unfree_mtr(hr_dev, mr->pbl_mtr); + else hns_roce_mtr_destroy(hr_dev, mr->pbl_mtr); - kvfree(mr->mtr_node); - mr->mtr_node = NULL; - } } static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) @@ -1235,49 +1226,22 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) kvfree(mtr); } -static void hns_roce_copy_mtr(struct hns_roce_mtr *new_mtr, struct hns_roce_mtr *old_mtr) -{ - struct list_head *new_head, *old_head; - int i, j; - - memcpy(new_mtr, old_mtr, sizeof(*old_mtr)); - - for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) - for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) { - new_head = &new_mtr->hem_list.mid_bt[i][j]; - old_head = &old_mtr->hem_list.mid_bt[i][j]; - list_replace(old_head, new_head); - } - - new_head = &new_mtr->hem_list.root_bt; - old_head = &old_mtr->hem_list.root_bt; - list_replace(old_head, new_head); - - new_head = &new_mtr->hem_list.btm_bt; - old_head = &old_mtr->hem_list.btm_bt; - list_replace(old_head, new_head); -} - -void hns_roce_add_unfree_mtr(struct hns_roce_mtr_node *pos, - struct hns_roce_dev *hr_dev, +void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) { - hns_roce_copy_mtr(&pos->mtr, mtr); - mutex_lock(&hr_dev->mtr_unfree_list_mutex); - list_add_tail(&pos->list, &hr_dev->mtr_unfree_list); + list_add_tail(&mtr->node, &hr_dev->mtr_unfree_list); mutex_unlock(&hr_dev->mtr_unfree_list_mutex); } void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev) { - struct hns_roce_mtr_node *pos, *next; + struct hns_roce_mtr *mtr, *next; mutex_lock(&hr_dev->mtr_unfree_list_mutex); - list_for_each_entry_safe(pos, next, &hr_dev->mtr_unfree_list, list) { - list_del(&pos->list); - hns_roce_mtr_destroy(hr_dev, &pos->mtr); - kvfree(pos); + list_for_each_entry_safe(mtr, next, &hr_dev->mtr_unfree_list, node) { + list_del(&mtr->node); + hns_roce_mtr_destroy(hr_dev, mtr); } mutex_unlock(&hr_dev->mtr_unfree_list_mutex); } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 81cd20924a67..828bf9e59e27 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -837,18 +837,12 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_device *ibdev = &hr_dev->ib_dev; int ret = 0; - hr_qp->mtr_node = kvmalloc(sizeof(*hr_qp->mtr_node), GFP_KERNEL); - if (!hr_qp->mtr_node) - return -ENOMEM; - if (dca_en) { /* DCA must be enabled after the buffer attr is configured. */ ret = hns_roce_enable_dca(hr_dev, hr_qp, udata); if (ret) { ibdev_err(ibdev, "failed to enable DCA, ret = %d.\n", ret); - kvfree(hr_qp->mtr_node); - hr_qp->mtr_node = NULL; return ret; } @@ -870,8 +864,6 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); if (dca_en) hns_roce_disable_dca(hr_dev, hr_qp, udata); - kvfree(hr_qp->mtr_node); - hr_qp->mtr_node = NULL; } else if (dca_en) { ret = hns_roce_map_dca_safe_page(hr_dev, hr_qp); } @@ -882,13 +874,10 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, static void free_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata) { - if (hr_qp->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(hr_qp->mtr_node, hr_dev, hr_qp->mtr); - } else { + if (hr_qp->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, hr_qp->mtr); + else hns_roce_mtr_destroy(hr_dev, hr_qp->mtr); - kvfree(hr_qp->mtr_node); - hr_qp->mtr_node = NULL; - } if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DYNAMIC_CTX_ATTACH) hns_roce_disable_dca(hr_dev, hr_qp, udata); diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index c4161e76ef07..965ed2d682ad 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -174,10 +174,6 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, struct hns_roce_buf_attr buf_attr = {}; int ret = 0; - idx_que->mtr_node = kvmalloc(sizeof(*idx_que->mtr_node), GFP_KERNEL); - if (!idx_que->mtr_node) - return -ENOMEM; - srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ); buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT; @@ -193,7 +189,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, ret = PTR_ERR(idx_que->mtr); ibdev_err(ibdev, "failed to alloc SRQ idx mtr, ret = %d.\n", ret); - goto err_kvmalloc; + return ret; } if (!udata) { @@ -211,9 +207,6 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, return 0; err_idx_mtr: hns_roce_mtr_destroy(hr_dev, idx_que->mtr); -err_kvmalloc: - kvfree(idx_que->mtr_node); - idx_que->mtr_node = NULL; return ret; } @@ -224,13 +217,10 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) bitmap_free(idx_que->bitmap); idx_que->bitmap = NULL; - if (srq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(idx_que->mtr_node, hr_dev, idx_que->mtr); - } else { + if (srq->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, idx_que->mtr); + else hns_roce_mtr_destroy(hr_dev, idx_que->mtr); - kvfree(idx_que->mtr_node); - idx_que->mtr_node = NULL; - } } static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, @@ -241,10 +231,6 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_buf_attr buf_attr = {}; int ret = 0; - srq->mtr_node = kvmalloc(sizeof(*srq->mtr_node), GFP_KERNEL); - if (!srq->mtr_node) - return -ENOMEM; - srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE, HNS_ROCE_SGE_SIZE * srq->max_gs))); @@ -262,8 +248,6 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, ret = PTR_ERR(srq->buf_mtr); ibdev_err(ibdev, "failed to alloc SRQ buf mtr, ret = %d.\n", ret); - kvfree(srq->mtr_node); - srq->mtr_node = NULL; } return ret; @@ -272,13 +256,10 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) { - if (srq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(srq->mtr_node, hr_dev, srq->buf_mtr); - } else { + if (srq->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, srq->buf_mtr); + else hns_roce_mtr_destroy(hr_dev, srq->buf_mtr); - kvfree(srq->mtr_node); - srq->mtr_node = NULL; - } } static int alloc_srq_wrid(struct hns_roce_srq *srq) -- 2.33.0