From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
------------------------------------------------- 1. Error handling of sp_free().
2. When sp_alloc(..., SP_HUGEPAGE, ...) rollbacks to normal page, we need to call vfs_fallocate() otherwise memory leaks until sp group is dead.
3. When sp_alloc(..., SP_HUGEPAGE, ...) rollbacks to normal page, we need to clear SP_HUGEPAGE bit in sp_flags then spa_stat interface will show the spa as a normal page spa correctly.
4. Add the reference count of a spg in find_or_alloc_sp_group() due to closer relationship.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index d39c2c3d728c..a71f6fb214ce 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -358,7 +358,7 @@ static struct sp_group *find_or_alloc_sp_group(int spg_id) spg->hugepage_failures = 0; spg->dvpp_multi_spaces = false; spg->owner = current->group_leader; - atomic_set(&spg->use_count, 0); + atomic_set(&spg->use_count, 1); INIT_LIST_HEAD(&spg->procs); INIT_LIST_HEAD(&spg->spa_list);
@@ -391,6 +391,10 @@ static struct sp_group *find_or_alloc_sp_group(int spg_id) ret = PTR_ERR(spg->file_hugetlb); goto out_fput; } + } else { + if (!spg_valid(spg)) + return ERR_PTR(-ENODEV); + atomic_inc(&spg->use_count); }
return spg; @@ -540,12 +544,6 @@ int sp_group_add_task(int pid, int spg_id) goto out_put_task; }
- if (!spg_valid(spg)) { - ret = -ENODEV; - goto out_put_task; - } - atomic_inc(&spg->use_count); - /* access control permission check */ if (sysctl_ac_mode == AC_SINGLE_OWNER) { if (spg->owner != current->group_leader) { @@ -1102,6 +1100,7 @@ int sp_free(unsigned long addr) if (printk_ratelimit()) pr_err("share pool: sp free failed, addr %pK is not from sp_alloc\n", (void *)addr); + goto drop_spa; }
if (!spg_valid(spa->spg)) @@ -1312,31 +1311,32 @@ void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) __sp_free(spg, sp_addr, size_aligned, list_next_entry(mm, sp_node));
+ if (printk_ratelimit()) + pr_warn("share pool: allocation failed due to mm populate failed" + "(potential no enough memory when -12): %d\n", ret); + p = ERR_PTR(ret); + + mode = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE; + offset = sp_addr - MMAP_SHARE_POOL_START; + + ret = vfs_fallocate(spa_file(spa), mode, offset, spa_size(spa)); + if (ret) + pr_err("share pool: sp alloc normal page fallocate failed %d\n", ret); + if (file == spg->file_hugetlb) { spg->hugepage_failures++;
/* fallback to small pages */ if (!(sp_flags & SP_HUGEPAGE_ONLY)) { file = spg->file; - spa->is_hugepage = false; size_aligned = ALIGN(size, PAGE_SIZE); + sp_flags &= ~SP_HUGEPAGE; __sp_area_drop(spa); mmput(mm); goto try_again; } }
- if (printk_ratelimit()) - pr_warn("share pool: allocation failed due to mm populate failed" - "(potential no enough memory when -12): %d\n", ret); - p = ERR_PTR(ret); - - mode = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE; - offset = sp_addr - MMAP_SHARE_POOL_START; - ret = vfs_fallocate(spa_file(spa), mode, offset, spa_size(spa)); - if (ret) - pr_err("share pool: fallocate failed %d\n", ret); - mmput(mm); break; }