From: Ding Tianhong dingtianhong@huawei.com
ascend inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI CVE: NA
---------------------------------------------------
When mmput is called concurrently, the judgment of "mm_users == 2" in sp_group_exit is not atomic with atomic_dec_and_test in mmput. The judgment of "mm_users == 2" may never be valid. As a result, mm leakage occurs.
For example, in a typical scenario, a process has two threads, with the mmget is performed in sp_group_add_task. In this case, mm_users is 3. When two threads exit at the same time, the judgment of "mm_users == 2" fail.
Therefore, the judgment and atomic_dec_and_test are put in the spg rw_lock to ensure the serialization of the whole process.
Signed-off-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 5 +++-- kernel/fork.c | 3 ++- mm/share_pool.c | 25 ++++++++++++++++++++----- 3 files changed, 25 insertions(+), 8 deletions(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index b0b2750e7bbe1..c03b83beaf63c 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -156,7 +156,7 @@ static inline void sp_init_mm(struct mm_struct *mm) }
extern int sp_group_add_task(int pid, int spg_id); -extern void sp_group_exit(struct mm_struct *mm); +extern int sp_group_exit(struct mm_struct *mm); extern void sp_group_post_exit(struct mm_struct *mm); extern int sp_group_id_by_pid(int pid); extern int sp_group_walk(int spg_id, void *data, int (*func)(struct mm_struct *mm, void *)); @@ -299,8 +299,9 @@ static inline int sp_group_add_task(int pid, int spg_id) return -EPERM; }
-static inline void sp_group_exit(struct mm_struct *mm) +static inline int sp_group_exit(struct mm_struct *mm) { + return 0; }
static inline void sp_group_post_exit(struct mm_struct *mm) diff --git a/kernel/fork.c b/kernel/fork.c index 22ed43ed527de..d091b680e5e62 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1086,7 +1086,8 @@ void mmput(struct mm_struct *mm) { might_sleep();
- sp_group_exit(mm); + if (sp_group_exit(mm)) + return;
if (atomic_dec_and_test(&mm->mm_users)) __mmput(mm); diff --git a/mm/share_pool.c b/mm/share_pool.c index 90930e4a8dfe4..61bbbd772c847 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -3110,14 +3110,20 @@ EXPORT_SYMBOL(sharepool_no_page);
#define MM_WOULD_FREE 2
-void sp_group_exit(struct mm_struct *mm) +int sp_group_exit(struct mm_struct *mm) { struct sp_group *spg = mm->sp_group; bool is_alive = true;
if (!spg || !enable_ascend_share_pool) - return; + return 0;
+ /* + * The judgment of mm->mm_users == MM_WOULD_FREE and atomic_dec_and_test + * must be atomic. Otherwise, mm->mm_users == MM_WOULD_FREE may never be + * true due to the gap in the middle. + */ + down_write(&spg->rw_lock); /* * Recall we add mm->users by 1 deliberately in sp_group_add_task(). * If the mm_users is 2, it means that the mm is ready to be freed @@ -3125,21 +3131,30 @@ void sp_group_exit(struct mm_struct *mm) * do_exit() -> exit_mm() -> mmput() -> THIS function. */ if (atomic_read(&mm->mm_users) == MM_WOULD_FREE) { - down_write(&spg->rw_lock); /* a dead group should NOT be reactive again */ if (spg_valid(spg) && list_is_singular(&spg->procs)) is_alive = spg->is_alive = false; if (mm->sp_group) /* concurrency handle of sp_group_add_task */ list_del(&mm->sp_node); /* affect spg->procs */ + /* match with get_task_mm() in sp_group_add_task() */ + atomic_dec(&mm->mm_users); up_write(&spg->rw_lock);
if (!is_alive) blocking_notifier_call_chain(&sp_notifier_chain, 0, mm->sp_group);
- /* match with get_task_mm() in sp_group_add_task() */ - atomic_dec(&mm->mm_users); + return 0; } + + if (atomic_dec_and_test(&mm->mm_users)) { + up_write(&spg->rw_lock); + WARN(1, "Invalid user counting\n"); + return 0; + } + + up_write(&spg->rw_lock); + return 1; }
void sp_group_post_exit(struct mm_struct *mm)