driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IB2H4P
----------------------------------------------------------------------
DCA mmap area would be freed after driver reseted or hot unplugged. At this time, the vma is still valid, which may lead to a PUAF.
Using rdma_user_mmap_io() to remap this area to ensure the vma is zapped after dirver removal.
Fixes: d3caaebdbfe9 ("RDMA/hns: Optimize user DCA perfermance by sharing DCA status") Signed-off-by: Chengchang Tang tangchengchang@huawei.com Signed-off-by: Xinghai Cen cenxinghai@h-partners.com --- drivers/infiniband/hw/hns/hns_roce_dca.c | 3 +- drivers/infiniband/hw/hns/hns_roce_main.c | 39 ++++------------------- 2 files changed, 9 insertions(+), 33 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_dca.c b/drivers/infiniband/hw/hns/hns_roce_dca.c index 49adf43261d6..4cef41591795 100644 --- a/drivers/infiniband/hw/hns/hns_roce_dca.c +++ b/drivers/infiniband/hw/hns/hns_roce_dca.c @@ -1132,7 +1132,8 @@ static void init_udca_status(struct hns_roce_ucontext *uctx, int udca_max_qps, return;
ctx->dca_mmap_entry = hns_roce_user_mmap_entry_insert(ib_uctx, - (u64)kaddr, size, HNS_ROCE_MMAP_TYPE_DCA); + (u64)virt_to_phys(kaddr), size, + HNS_ROCE_MMAP_TYPE_DCA); if (!ctx->dca_mmap_entry) { free_pages_exact(kaddr, size); return; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 1302c58c3937..85223d25995d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -644,36 +644,6 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); }
-static int mmap_dca(struct ib_ucontext *context, struct vm_area_struct *vma) -{ - struct hns_roce_ucontext *uctx = to_hr_ucontext(context); - struct hns_roce_dca_ctx *ctx = &uctx->dca_ctx; - struct page **pages; - unsigned long num; - int ret; - - if ((vma->vm_end - vma->vm_start != (ctx->status_npage * PAGE_SIZE) || - !(vma->vm_flags & VM_SHARED))) - return -EINVAL; - - if (!(vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_EXEC)) - return -EPERM; - - if (!ctx->buf_status) - return -EOPNOTSUPP; - - pages = kcalloc(ctx->status_npage, sizeof(struct page *), GFP_KERNEL); - if (!pages) - return -ENOMEM; - - for (num = 0; num < ctx->status_npage; num++) - pages[num] = virt_to_page(ctx->buf_status + num * PAGE_SIZE); - - ret = vm_insert_pages(vma, vma->vm_start, pages, &num); - kfree(pages); - return ret; -} - static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) { struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); @@ -703,8 +673,13 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) prot = pgprot_device(vma->vm_page_prot); break; case HNS_ROCE_MMAP_TYPE_DCA: - ret = mmap_dca(uctx, vma); - goto out; + if (!(vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_EXEC)) { + ret = -EPERM; + goto out; + } + vma->vm_flags |= VM_DONTEXPAND; + prot = vma->vm_page_prot; + break; case HNS_ROCE_MMAP_TYPE_RESET: if (vma->vm_flags & (VM_WRITE | VM_EXEC)) { ret = -EINVAL;