From: Peter Zijlstra peterz@infradead.org
mainline inclusion from mainline-v6.2-rc1 commit af80602799681c78f14fbe20b6185a56020dedee category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7XLNT CVE: CVE-2022-40982
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
---------------------------
commit af80602799681c78f14fbe20b6185a56020dedee upstream.
In order to allow using mm_alloc() much earlier, move initializing mm_cachep into mm_init().
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Link: https://lkml.kernel.org/r/20221025201057.751153381@infradead.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org conflicts: kernel/fork.c Signed-off-by: Zeng Heng zengheng4@huawei.com --- include/linux/sched/task.h | 1 + init/main.c | 1 + kernel/fork.c | 32 ++++++++++++++++++-------------- 3 files changed, 20 insertions(+), 14 deletions(-)
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index bc76171fa6d7..41757b291204 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -62,6 +62,7 @@ extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
+extern void mm_cache_init(void); extern void proc_caches_init(void);
extern void fork_init(void); diff --git a/init/main.c b/init/main.c index 8cccd0687a93..342b6226ac4e 100644 --- a/init/main.c +++ b/init/main.c @@ -841,6 +841,7 @@ static void __init mm_init(void) init_espfix_bsp(); /* Should be run after espfix64 is set up. */ pti_init(); + mm_cache_init(); }
#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET diff --git a/kernel/fork.c b/kernel/fork.c index d50609760c26..40c96a559a9f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2923,10 +2923,27 @@ static void sighand_ctor(void *data) init_waitqueue_head(&sighand->signalfd_wqh); }
-void __init proc_caches_init(void) +void __init mm_cache_init(void) { unsigned int mm_size;
+ /* + * The mm_cpumask is located at the end of mm_struct, and is + * dynamically sized based on the maximum CPU number this system + * can have, taking hotplug into account (nr_cpu_ids). + */ + mm_size = MM_STRUCT_SIZE; + + mm_cachep = kmem_cache_create_usercopy("mm_struct", + mm_size, ARCH_MIN_MMSTRUCT_ALIGN, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, + OFFSET_OF_MM_SAVED_AUXV, + SIZE_OF_MM_SAVED_AUXV, + NULL); +} + +void __init proc_caches_init(void) +{ sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| @@ -2944,19 +2961,6 @@ void __init proc_caches_init(void) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
- /* - * The mm_cpumask is located at the end of mm_struct, and is - * dynamically sized based on the maximum CPU number this system - * can have, taking hotplug into account (nr_cpu_ids). - */ - mm_size = MM_STRUCT_SIZE; - - mm_cachep = kmem_cache_create_usercopy("mm_struct", - mm_size, ARCH_MIN_MMSTRUCT_ALIGN, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, - OFFSET_OF_MM_SAVED_AUXV, - SIZE_OF_MM_SAVED_AUXV, - NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); mmap_init(); nsproxy_cache_init();