From: Kefeng Wang wangkefeng.wang@huawei.com
mainline inclusion from mainline-v5.16-rc1 commit 3252b1d8309ea42bc6329d9341072ecf1c9505c0 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB2BDP CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
With KASAN_VMALLOC and NEED_PER_CPU_PAGE_FIRST_CHUNK the kernel crashes:
Unable to handle kernel paging request at virtual address ffff7000028f2000 ... swapper pgtable: 64k pages, 48-bit VAs, pgdp=0000000042440000 [ffff7000028f2000] pgd=000000063e7c0003, p4d=000000063e7c0003, pud=000000063e7c0003, pmd=000000063e7b0003, pte=0000000000000000 Internal error: Oops: 96000007 [#1] PREEMPT SMP Modules linked in: CPU: 0 PID: 0 Comm: swapper Not tainted 5.13.0-rc4-00003-gc6e6e28f3f30-dirty #62 Hardware name: linux,dummy-virt (DT) pstate: 200000c5 (nzCv daIF -PAN -UAO -TCO BTYPE=--) pc : kasan_check_range+0x90/0x1a0 lr : memcpy+0x88/0xf4 sp : ffff80001378fe20 ... Call trace: kasan_check_range+0x90/0x1a0 pcpu_page_first_chunk+0x3f0/0x568 setup_per_cpu_areas+0xb8/0x184 start_kernel+0x8c/0x328
The vm area used in vm_area_register_early() has no kasan shadow memory, Let's add a new kasan_populate_early_vm_area_shadow() function to populate the vm area shadow memory to fix the issue.
[wangkefeng.wang@huawei.com: fix redefinition of 'kasan_populate_early_vm_area_shadow'] Link: https://lkml.kernel.org/r/20211011123211.3936196-1-wangkefeng.wang@huawei.co...
Link: https://lkml.kernel.org/r/20210910053354.26721-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Acked-by: Marco Elver elver@google.com [KASAN] Acked-by: Andrey Konovalov andreyknvl@gmail.com [KASAN] Acked-by: Catalin Marinas catalin.marinas@arm.com Cc: Andrey Ryabinin ryabinin.a.a@gmail.com Cc: Dmitry Vyukov dvyukov@google.com Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Will Deacon will@kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Conflicts: include/linux/kasan.h [Becasue OLK-5.10 don't have mm/kasan/shadow.c, move "void __init __weak kasan_populate_early_vm_area_shadow(void *start, unsigned long size)" to mm/kasan/commmon.c] Signed-off-by: Kaixiong Yu yukaixiong@huawei.com --- arch/arm64/mm/kasan_init.c | 16 ++++++++++++++++ include/linux/kasan.h | 10 +++++++++- mm/kasan/common.c | 5 +++++ mm/vmalloc.c | 1 + 4 files changed, 31 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 02051d4074c4..952807291615 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -208,6 +208,22 @@ static void __init clear_pgds(unsigned long start, set_pgd(pgd_offset_k(start), __pgd(0)); }
+#ifdef CONFIG_KASAN_VMALLOC +void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) +{ + unsigned long shadow_start, shadow_end; + + if (!is_vmalloc_or_module_addr(start)) + return; + + shadow_start = (unsigned long)kasan_mem_to_shadow(start); + shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); + shadow_end = (unsigned long)kasan_mem_to_shadow(start + size); + shadow_end = ALIGN(shadow_end, PAGE_SIZE); + kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE); +} +#endif + void __init kasan_init(void) { u64 kimg_shadow_start, kimg_shadow_end; diff --git a/include/linux/kasan.h b/include/linux/kasan.h index c0b976dd138b..894bbeaceb05 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -217,7 +217,10 @@ void kasan_unpoison_vmalloc(const void *start, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); -#else + +void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); + +#else /* CONFIG_KASAN_VMALLOC */ static inline int kasan_populate_vmalloc(unsigned long start, unsigned long size) { @@ -232,6 +235,11 @@ static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end) {} + +static inline void kasan_populate_early_vm_area_shadow(void *start, + unsigned long size) +{ } + #endif
#ifdef CONFIG_KASAN diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 592eeba0a787..1958d7d64d1d 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -997,4 +997,9 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, (unsigned long)shadow_end); } } + +void __init __weak kasan_populate_early_vm_area_shadow(void *start, + unsigned long size) +{ } + #endif diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 6de2ffbe925f..4a2c6ce0ad56 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2298,6 +2298,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) vm->addr = (void *)addr; vm->next = *p; *p = vm; + kasan_populate_early_vm_area_shadow(vm->addr, vm->size); }
static void vmap_init_free_space(void)