From: Zhang Zekun zhangzekun11@huawei.com
Offering: HULK hulk inclusion category: performance bugzilla: https://gitee.com/openeuler/kernel/issues/I5XQS4 CVE: NA
-----------------------------------------------
Fix the following CodeCheck2.0 static check warning:
(1) Multi-branch statements of if, else/elseif, if there are multiple statements in any branch, all branches need to be braced.
Add brackets in if, elese/elseif statements in __sp_alloc_mmap_populate(), mg_sp_make_share_k2u(), sp_pmd_entry(), mg_sp_walk_page_range(),
(2) Use parentheses to specify the sequence of expressions, instead of using the default priority.Should use parenthesis while use bitwise operator.
Use parentheses to specify the sequence of expressions in sp_remap_kva_to_vma(), sp_node_id(), init_local_group().
(3) There must be 1 space between the block comment character and the comment content.
Add a space after the comment character.
(4) Do not add blank lines on the start of a code block defined by braces.
Remove the blank line in proc_usage_by_group().
Besides, change the name of __find_sp_area() to get_sp_area() to represent that this function need not to be called with lock hold and implicit that this function will increase the use_count.
Signed-off-by: Zhang Zekun zhangzekun11@huawei.com --- mm/share_pool.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 51422fdb1485..6da780c28b1e 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -503,7 +503,7 @@ static int init_local_group(struct mm_struct *mm)
ret = local_group_add_task(mm, spg); if (ret < 0) - /* The spm would be released while destroying the spg*/ + /* The spm would be released while destroying the spg */ goto free_spg;
return 0; @@ -1703,7 +1703,7 @@ int mg_sp_id_of_current(void) if (!sp_is_enabled()) return -EOPNOTSUPP;
- if (current->flags & PF_KTHREAD || !current->mm) + if ((current->flags & PF_KTHREAD) || !current->mm) return -EINVAL;
down_read(&sp_group_sem); @@ -1937,7 +1937,7 @@ static struct sp_area *__find_sp_area_locked(struct sp_group *spg, return NULL; }
-static struct sp_area *__find_sp_area(struct sp_group *spg, unsigned long addr) +static struct sp_area *get_sp_area(struct sp_group *spg, unsigned long addr) { struct sp_area *n;
@@ -2143,7 +2143,7 @@ static int sp_free_get_spa(struct sp_free_context *fc)
fc->state = FREE_CONT;
- spa = __find_sp_area(spg, addr); + spa = get_sp_area(spg, addr); sp_group_drop(spg); if (!spa) { pr_debug("sp free invalid input addr %lx\n", addr); @@ -2232,7 +2232,7 @@ int mg_sp_free(unsigned long addr, int id) else sp_update_process_stat(current, false, fc.spa);
- __sp_area_drop(fc.spa); /* match __find_sp_area in sp_free_get_spa */ + __sp_area_drop(fc.spa); /* match get_sp_area in sp_free_get_spa */ out: return ret; } @@ -2517,8 +2517,9 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, else pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n", ret); - } else + } else { ac->need_fallocate = true; + } return ret; }
@@ -2692,7 +2693,7 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa, goto put_mm; }
- if (kc && kc->sp_flags & SP_PROT_RO) + if (kc && (kc->sp_flags & SP_PROT_RO)) prot = PROT_READ;
ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma); @@ -2962,9 +2963,9 @@ void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size, if (ret) return ERR_PTR(ret);
- if (kc.to_task) + if (kc.to_task) { uva = sp_make_share_kva_to_task(kc.kva_aligned, kc.size_aligned, kc.sp_flags); - else { + } else { struct sp_group *spg;
spg = __sp_find_spg(current->pid, kc.spg_id); @@ -2977,8 +2978,9 @@ void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size, } uva = sp_make_share_kva_to_spg(kc.kva_aligned, kc.size_aligned, kc.sp_flags, spg); sp_group_drop(spg); - } else + } else { uva = ERR_PTR(-ENODEV); + } }
out: @@ -3002,8 +3004,9 @@ static int sp_pmd_entry(pmd_t *pmd, unsigned long addr, if (!sp_walk_data->is_page_type_set) { sp_walk_data->is_page_type_set = true; sp_walk_data->is_hugepage = true; - } else if (!sp_walk_data->is_hugepage) + } else if (!sp_walk_data->is_hugepage) { return -EFAULT; + }
/* To skip pte level walk */ walk->action = ACTION_CONTINUE; @@ -3329,9 +3332,9 @@ static int sp_unshare_uva(unsigned long uva, unsigned long size, int group_id) * at first we guess it's a hugepage addr * we can tolerate at most PMD_SIZE or PAGE_SIZE which is matched in k2u */ - spa = __find_sp_area(spg, ALIGN_DOWN(uva, PMD_SIZE)); + spa = get_sp_area(spg, ALIGN_DOWN(uva, PMD_SIZE)); if (!spa) { - spa = __find_sp_area(spg, ALIGN_DOWN(uva, PAGE_SIZE)); + spa = get_sp_area(spg, ALIGN_DOWN(uva, PAGE_SIZE)); if (!spa) { ret = -EINVAL; pr_debug("invalid input uva %lx in unshare uva\n", (unsigned long)uva); @@ -3571,9 +3574,9 @@ int mg_sp_walk_page_range(unsigned long uva, unsigned long size, }
down_write(&mm->mmap_lock); - if (likely(!mm->core_state)) + if (likely(!mm->core_state)) { ret = __sp_walk_page_range(uva, size, mm, sp_walk_data); - else { + } else { pr_err("walk page range: encoutered coredump\n"); ret = -ESRCH; } @@ -3723,7 +3726,7 @@ int sp_node_id(struct vm_area_struct *vma) if (!sp_is_enabled()) return node_id;
- if (vma && vma->vm_flags & VM_SHARE_POOL && vma->vm_private_data) { + if (vma && (vma->vm_flags & VM_SHARE_POOL) && vma->vm_private_data) { spa = vma->vm_private_data; node_id = spa->node_id; } @@ -4082,7 +4085,6 @@ static int proc_usage_by_group(int id, void *p, void *data)
down_read(&spg->rw_lock); list_for_each_entry(spg_node, &spg->procs, proc_node) { - master = spg_node->master; mm = master->mm; tgid = master->instat.tgid;