From: Guo Mengqi guomengqi3@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I5QETC CVE: NA
--------------------------------
sp_make_share_k2u only supports vmalloc address now. Therefore, delete a backup handle case.
Signed-off-by: Guo Mengqi guomengqi3@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- mm/share_pool.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index bd021da5a930..d49c066d1229 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -2860,11 +2860,11 @@ static int is_vmap_hugepage(unsigned long addr)
static unsigned long __sp_remap_get_pfn(unsigned long kva) { - unsigned long pfn; + unsigned long pfn = -EINVAL; + + /* sp_make_share_k2u only support vmalloc address */ if (is_vmalloc_addr((void *)kva)) pfn = vmalloc_to_pfn((void *)kva); - else - pfn = virt_to_pfn(kva);
return pfn; }
From: Guo Mengqi guomengqi3@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I5QQPG CVE: NA
--------------------------------
Add a size-0-check in mg_sp_make_share_k2u() to avoid passing 0-size spa to __insert_sp_area().
Signed-off-by: Guo Mengqi guomengqi3@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- mm/share_pool.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/mm/share_pool.c b/mm/share_pool.c index d49c066d1229..fb1722e8fbf1 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -3097,6 +3097,11 @@ static int sp_k2u_prepare(unsigned long kva, unsigned long size,
trace_sp_k2u_begin(kc);
+ if (!size) { + pr_err_ratelimited("k2u input size is 0.\n"); + return -EINVAL; + } + if (sp_flags & ~SP_FLAG_MASK) { pr_err_ratelimited("k2u sp_flags %lx error\n", sp_flags); return -EINVAL;
From: Guo Mengqi guomengqi3@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I5R0X9 CVE: NA
--------------------------------
Fix a AA deadlock caused by nested lock in mg_sp_group_add_task().
Deadlock path:
mg_sp_group_add_task()
down_write(sp_group_sem) find_or_alloc_sp_group() !spg_valid() sp_group_drop() free_sp_group() -> down_write(sp_group_sem) ---> AA deadlock
Signed-off-by: Guo Mengqi guomengqi3@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- mm/share_pool.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index fb1722e8fbf1..41ed3a3b8682 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -946,6 +946,14 @@ static void free_sp_group(struct sp_group *spg) up_write(&sp_group_sem); }
+static void sp_group_drop_locked(struct sp_group *spg) +{ + lockdep_assert_held_exclusive(&sp_group_sem); + + if (atomic_dec_and_test(&spg->use_count)) + free_sp_group_locked(spg); +} + static void sp_group_drop(struct sp_group *spg) { if (atomic_dec_and_test(&spg->use_count)) @@ -1234,7 +1242,7 @@ static struct sp_group *find_or_alloc_sp_group(int spg_id, unsigned long flag) down_read(&spg->rw_lock); if (!spg_valid(spg)) { up_read(&spg->rw_lock); - sp_group_drop(spg); + sp_group_drop_locked(spg); return ERR_PTR(-ENODEV); } up_read(&spg->rw_lock);