From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI CVE: NA
-------------------------------------------------
Refactor sp_k2u to improve its readability. 1. Introduce struct sp_k2u_context to save mapping parameters. 2. Extract sp_k2u_prepare to check input parameters of sp_k2u and initialize sp_k2u_context instance. 3. Extract sp_k2u_finish.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 110 +++++++++++++++++++++++++++++++----------------- 1 file changed, 71 insertions(+), 39 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 716b3ea7c960d..824163e764e63 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -2574,42 +2574,30 @@ static int is_k2task(int spg_id) return (spg_id == SPG_ID_DEFAULT || spg_id == SPG_ID_NONE) ? 1 : 0; }
-/** - * sp_make_share_k2u() - Share kernel memory to current process or an sp_group. - * @kva: the VA of shared kernel memory. - * @size: the size of shared kernel memory. - * @sp_flags: how to allocate the memory. We only support SP_DVPP. - * @pid: the pid of the specified process (Not currently in use). - * @spg_id: the share group that the memory is shared to. - * - * Return: the shared target user address to start at - * - * Share kernel memory to current task if spg_id == SPG_ID_NONE - * or SPG_ID_DEFAULT in multi-group mode. - * - * Return: - * * if succeed, return the shared user address to start at. - * * if fail, return the pointer of -errno. - */ -void *sp_make_share_k2u(unsigned long kva, unsigned long size, - unsigned long sp_flags, int pid, int spg_id) -{ - void *uva; +struct sp_k2u_context { + unsigned long kva; unsigned long kva_aligned; + unsigned long size; unsigned long size_aligned; - unsigned int page_size = PAGE_SIZE; - int is_hugepage, to_task; + unsigned long sp_flags; + int spg_id; +};
- check_interrupt_context(); +static int sp_k2u_prepare(unsigned long kva, unsigned long size, + unsigned long sp_flags, int spg_id, struct sp_k2u_context *kc) +{ + int is_hugepage; + unsigned int page_size = PAGE_SIZE; + unsigned long kva_aligned, size_aligned;
if (sp_flags & ~SP_DVPP) { pr_err_ratelimited("k2u sp_flags %lx error\n", sp_flags); - return ERR_PTR(-EINVAL); + return -EINVAL; }
if (!current->mm) { pr_err_ratelimited("k2u: kthread is not allowed\n"); - return ERR_PTR(-EPERM); + return -EPERM; }
is_hugepage = is_vmap_hugepage(kva); @@ -2620,7 +2608,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, /* do nothing */ } else { pr_err_ratelimited("k2u kva is not vmalloc address\n"); - return ERR_PTR(is_hugepage); + return is_hugepage; }
/* aligned down kva is convenient for caller to start with any valid kva */ @@ -2629,31 +2617,75 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size,
if (!vmalloc_area_set_flag(kva_aligned, VM_SHAREPOOL)) { pr_debug("k2u_task kva %lx is not valid\n", kva_aligned); - return ERR_PTR(-EINVAL); + return -EINVAL; }
- to_task = is_k2task(spg_id); + kc->kva = kva; + kc->kva_aligned = kva_aligned; + kc->size = size; + kc->size_aligned = size_aligned; + kc->sp_flags = sp_flags; + kc->spg_id = spg_id; + return 0; +} + +static void *sp_k2u_finish(void *uva, struct sp_k2u_context *kc) +{ + if (IS_ERR(uva)) + vmalloc_area_clr_flag(kc->kva_aligned, VM_SHAREPOOL); + else + uva = uva + (kc->kva - kc->kva_aligned); + + sp_dump_stack(); + return uva; +} + +/** + * sp_make_share_k2u() - Share kernel memory to current process or an sp_group. + * @kva: the VA of shared kernel memory. + * @size: the size of shared kernel memory. + * @sp_flags: how to allocate the memory. We only support SP_DVPP. + * @pid: the pid of the specified process (Not currently in use). + * @spg_id: the share group that the memory is shared to. + * + * Return: the shared target user address to start at + * + * Share kernel memory to current task if spg_id == SPG_ID_NONE + * or SPG_ID_DEFAULT in multi-group mode. + * + * Return: + * * if succeed, return the shared user address to start at. + * * if fail, return the pointer of -errno. + */ +void *sp_make_share_k2u(unsigned long kva, unsigned long size, + unsigned long sp_flags, int pid, int spg_id) +{ + void *uva; + int ret, to_task; + struct sp_k2u_context kc; + + check_interrupt_context(); + + ret = sp_k2u_prepare(kva, size, sp_flags, spg_id, &kc); + if (ret) + return ERR_PTR(ret); + + to_task = is_k2task(kc.spg_id); if (to_task == 1) - uva = sp_make_share_kva_to_task(kva_aligned, size_aligned, sp_flags); + uva = sp_make_share_kva_to_task(kc.kva_aligned, kc.size_aligned, kc.sp_flags); else if (to_task == 0) { struct sp_group *spg;
- spg = __sp_find_spg(current->pid, spg_id); + spg = __sp_find_spg(current->pid, kc.spg_id); if (spg) { - uva = sp_make_share_kva_to_spg(kva_aligned, size_aligned, sp_flags, spg); + uva = sp_make_share_kva_to_spg(kc.kva_aligned, kc.size_aligned, kc.sp_flags, spg); sp_group_drop(spg); } else uva = ERR_PTR(-ENODEV); } else uva = ERR_PTR(to_task);
- if (IS_ERR(uva)) - vmalloc_area_clr_flag(kva_aligned, VM_SHAREPOOL); - else - uva = uva + (kva - kva_aligned); - - sp_dump_stack(); - return uva; + return sp_k2u_finish(uva, &kc); } EXPORT_SYMBOL_GPL(sp_make_share_k2u);