ascend inclusion category: Feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LNGH
---------------------------------------------
The fork() will create the new mm for new process, the mm should not take any information from the parent process, so need to clean it.
The exit() will mmput the mm and free the memory, if the mm is alrready be used for sp_group, need to clean the group first.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- include/linux/share_pool.h | 21 +++++++++++++- kernel/fork.c | 3 ++ mm/share_pool.c | 59 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 1 deletion(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index f3ce79f38689..f8f79234b27e 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -124,6 +124,13 @@ static inline bool sp_is_enabled(void) return static_branch_likely(&share_pool_enabled_key); }
+extern void __sp_mm_clean(struct mm_struct *mm); +static inline void sp_mm_clean(struct mm_struct *mm) +{ + if (sp_is_enabled()) + __sp_mm_clean(mm); +} + static inline void sp_area_work_around(struct vm_unmapped_area_info *info) { if (sp_is_enabled()) @@ -142,6 +149,11 @@ static inline bool sp_check_vm_share_pool(unsigned long vm_flags) return sp_is_enabled() && (vm_flags & VM_SHARE_POOL); }
+static inline void sp_init_mm(struct mm_struct *mm) +{ + mm->sp_group_master = NULL; +} + #else /* CONFIG_SHARE_POOL */
static inline int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id) @@ -185,10 +197,18 @@ static inline int mg_sp_id_of_current(void) return -EPERM; }
+static inline void sp_mm_clean(struct mm_struct *mm) +{ +} + static inline void sp_area_drop(struct vm_area_struct *vma) { }
+static inline void sp_init_mm(struct mm_struct *mm) +{ +} + static inline int mg_sp_walk_page_range(unsigned long uva, unsigned long size, struct task_struct *tsk, struct sp_walk_data *sp_walk_data) { @@ -222,7 +242,6 @@ static inline bool sp_check_vm_share_pool(unsigned long vm_flags) { return false; } - #endif /* !CONFIG_SHARE_POOL */
#endif /* LINUX_SHARE_POOL_H */ diff --git a/kernel/fork.c b/kernel/fork.c index 3b6d20dfb9a8..edbb16be9b39 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -99,6 +99,7 @@ #include <linux/stackprotector.h> #include <linux/user_events.h> #include <linux/iommu.h> +#include <linux/share_pool.h>
#include <asm/pgalloc.h> #include <linux/uaccess.h> @@ -1308,6 +1309,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, NR_MM_COUNTERS)) goto fail_pcpu;
+ sp_init_mm(mm); mm->user_ns = get_user_ns(user_ns); lru_gen_init_mm(mm); return mm; @@ -1347,6 +1349,7 @@ static inline void __mmput(struct mm_struct *mm) ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); + sp_mm_clean(mm); mm_put_huge_zero_page(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { diff --git a/mm/share_pool.c b/mm/share_pool.c index 97c0abf14361..548b85e61989 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -3349,6 +3349,65 @@ static void __init proc_sharepool_init(void)
/*** End of tatistical and maintenance functions ***/
+void __sp_mm_clean(struct mm_struct *mm) +{ + struct sp_meminfo *meminfo; + long alloc_size, k2u_size; + /* lockless visit */ + struct sp_group_master *master = mm->sp_group_master; + struct sp_group_node *spg_node, *tmp; + struct sp_group *spg; + + if (!master) + return; + + /* + * There are two basic scenarios when a process in the share pool is + * exiting but its share pool memory usage is not 0. + * 1. Process A called sp_alloc(), but it terminates without calling + * sp_free(). Then its share pool memory usage is a positive number. + * 2. Process A never called sp_alloc(), and process B in the same spg + * called sp_alloc() to get an addr u. Then A gets u somehow and + * called sp_free(u). Now A's share pool memory usage is a negative + * number. Notice B's memory usage will be a positive number. + * + * We decide to print an info when seeing both of the scenarios. + * + * A process not in an sp group doesn't need to print because there + * wont't be any memory which is not freed. + */ + meminfo = &master->meminfo; + alloc_size = meminfo_alloc_sum(meminfo); + k2u_size = atomic64_read(&meminfo->k2u_size); + if (alloc_size != 0 || k2u_size != 0) + pr_info("process %s(%d) exits. It applied %ld aligned KB, k2u shared %ld aligned KB\n", + master->comm, master->tgid, + byte2kb(alloc_size), byte2kb(k2u_size)); + + down_write(&sp_global_sem); + list_for_each_entry_safe(spg_node, tmp, &master->group_head, group_node) { + spg = spg_node->spg; + + down_write(&spg->rw_lock); + + list_del(&spg_node->proc_node); + spg->proc_num--; + list_del(&spg_node->group_node); + master->group_num--; + + up_write(&spg->rw_lock); + + mmdrop(mm); + sp_group_put_locked(spg); + kfree(spg_node); + } + up_write(&sp_global_sem); + + sp_del_group_master(master); + + kfree(master); +} + DEFINE_STATIC_KEY_FALSE(share_pool_enabled_key);
static int __init enable_share_pool(char *s)