From: Wang Wensheng wangwensheng4@huawei.com
ascend inclusion category: Feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4NDAW CVE: NA
-------------------
This is reversed to sp_make_share_u2k that unmaps the vmalloc area from u2k.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com Signed-off-by: Tang Yizhou tangyizhou@huawei.com Signed-off-by: Peng Wu wupeng58@huawei.com Reviewed-by: Kefeng Wangwangkefeng.wang@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- mm/share_pool.c | 97 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 96 insertions(+), 1 deletion(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 4d45a2519b10..8e357056110e 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -1120,6 +1120,36 @@ void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) } EXPORT_SYMBOL_GPL(mg_sp_alloc);
+/** + * is_vmap_hugepage() - Check if a kernel address belongs to vmalloc family. + * @addr: the kernel space address to be checked. + * + * Return: + * * >0 - a vmalloc hugepage addr. + * * =0 - a normal vmalloc addr. + * * -errno - failure. + */ +static int is_vmap_hugepage(unsigned long addr) +{ + struct vm_struct *area; + + if (unlikely(!addr)) { + pr_err_ratelimited("null vmap addr pointer\n"); + return -EINVAL; + } + + area = find_vm_area((void *)addr); + if (unlikely(!area)) { + pr_debug("can't find vm area(%lx)\n", addr); + return -EINVAL; + } + + if (area->flags & VM_HUGE_PAGES) + return 1; + else + return 0; +} + /** * sp_make_share_k2u() - Share kernel memory to current process or an sp_group. * @kva: the VA of shared kernel memory. @@ -1422,6 +1452,55 @@ void *mg_sp_make_share_u2k(unsigned long uva, unsigned long size, int pid) } EXPORT_SYMBOL_GPL(mg_sp_make_share_u2k);
+static int sp_unshare_uva(unsigned long uva, unsigned long size) +{ + return 0; +} + +/* No possible concurrent protection, take care when use */ +static int sp_unshare_kva(unsigned long kva, unsigned long size) +{ + unsigned long addr, kva_aligned; + struct page *page; + unsigned long size_aligned; + unsigned long step; + bool is_hugepage = true; + int ret; + + ret = is_vmap_hugepage(kva); + if (ret > 0) { + kva_aligned = ALIGN_DOWN(kva, PMD_SIZE); + size_aligned = ALIGN(kva + size, PMD_SIZE) - kva_aligned; + step = PMD_SIZE; + } else if (ret == 0) { + kva_aligned = ALIGN_DOWN(kva, PAGE_SIZE); + size_aligned = ALIGN(kva + size, PAGE_SIZE) - kva_aligned; + step = PAGE_SIZE; + is_hugepage = false; + } else { + pr_err_ratelimited("check vmap hugepage failed %d\n", ret); + return -EINVAL; + } + + if (kva_aligned + size_aligned < kva_aligned) { + pr_err_ratelimited("overflow happened in unshare kva\n"); + return -EINVAL; + } + + for (addr = kva_aligned; addr < (kva_aligned + size_aligned); addr += step) { + page = vmalloc_to_page((void *)addr); + if (page) + put_page(page); + else + WARN(1, "vmalloc %pK to page/hugepage failed\n", + (void *)addr); + } + + vunmap((void *)kva_aligned); + + return 0; +} + /** * sp_unshare() - Unshare the kernel or user memory which shared by calling * sp_make_share_{k2u,u2k}(). @@ -1434,7 +1513,23 @@ EXPORT_SYMBOL_GPL(mg_sp_make_share_u2k); */ int sp_unshare(unsigned long va, unsigned long size, int pid, int spg_id) { - return 0; + int ret = 0; + + check_interrupt_context(); + + if (va < TASK_SIZE) { + /* user address */ + ret = sp_unshare_uva(va, size); + } else if (va >= PAGE_OFFSET) { + /* kernel address */ + ret = sp_unshare_kva(va, size); + } else { + /* regard user and kernel address ranges as bad address */ + pr_debug("unshare addr %lx is not a user or kernel addr\n", (unsigned long)va); + ret = -EFAULT; + } + + return ret; } EXPORT_SYMBOL_GPL(sp_unshare);