From: Marco Elver elver@google.com
Add architecture specific implementation details for KFENCE and enable KFENCE for the arm64 architecture. In particular, this implements the required interface in <asm/kfence.h>.
KFENCE requires that attributes for pages from its memory pool can individually be set. Therefore, force the entire linear map to be mapped at page granularity. Doing so may result in extra memory allocated for page tables in case rodata=full is not set; however, currently CONFIG_RODATA_FULL_DEFAULT_ENABLED=y is the default, and the common case is therefore not affected by this change.
[elver@google.com: add missing copyright and description header] Link: https://lkml.kernel.org/r/20210118092159.145934-3-elver@google.com
Link: https://lkml.kernel.org/r/20201103175841.3495947-4-elver@google.com Signed-off-by: Alexander Potapenko glider@google.com Signed-off-by: Marco Elver elver@google.com Reviewed-by: Dmitry Vyukov dvyukov@google.com Co-developed-by: Alexander Potapenko glider@google.com Reviewed-by: Jann Horn jannh@google.com Reviewed-by: Mark Rutland mark.rutland@arm.com Cc: Andrey Konovalov andreyknvl@google.com Cc: Andrey Ryabinin aryabinin@virtuozzo.com Cc: Andy Lutomirski luto@kernel.org Cc: Borislav Petkov bp@alien8.de Cc: Catalin Marinas catalin.marinas@arm.com Cc: Christopher Lameter cl@linux.com Cc: Dave Hansen dave.hansen@linux.intel.com Cc: David Rientjes rientjes@google.com Cc: Eric Dumazet edumazet@google.com Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Hillf Danton hdanton@sina.com Cc: "H. Peter Anvin" hpa@zytor.com Cc: Ingo Molnar mingo@redhat.com Cc: Joern Engel joern@purestorage.com Cc: Jonathan Corbet corbet@lwn.net Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Cc: Kees Cook keescook@chromium.org Cc: Paul E. McKenney paulmck@kernel.org Cc: Pekka Enberg penberg@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: SeongJae Park sjpark@amazon.de Cc: Thomas Gleixner tglx@linutronix.de Cc: Vlastimil Babka vbabka@suse.cz Cc: Will Deacon will@kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Yingjie Shang 1415317271@qq.com Reviewed-by: Bixuan Cui cuibixuan@huawei.com --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/kfence.h | 24 ++++++++++++++++++++++++ arch/arm64/mm/fault.c | 4 ++++ arch/arm64/mm/mmu.c | 7 ++++++- 4 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/kfence.h
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 109d8a18f268..6331f54b493a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -138,6 +138,7 @@ config ARM64 select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN + select HAVE_ARCH_KFENCE select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h new file mode 100644 index 000000000000..42a06f83850a --- /dev/null +++ b/arch/arm64/include/asm/kfence.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arm64 KFENCE support. + * + * Copyright (C) 2020, Google LLC. + */ + +#ifndef __ASM_KFENCE_H +#define __ASM_KFENCE_H + +#include <asm/cacheflush.h> + +#define KFENCE_SKIP_ARCH_FAULT_HANDLER "el1_sync" + +static inline bool arch_kfence_init_pool(void) { return true; } + +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + set_memory_valid(addr, 1, !protect); + + return true; +} + +#endif /* __ASM_KFENCE_H */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 795d224f184f..d1a8b523d28e 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -10,6 +10,7 @@ #include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/extable.h> +#include <linux/kfence.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/hardirq.h> @@ -322,6 +323,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, } else if (addr < PAGE_SIZE) { msg = "NULL pointer dereference"; } else { + if (kfence_handle_page_fault(addr)) + return; + msg = "paging request"; }
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1ec57fd87ef7..2af6d99a8c2e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1452,7 +1452,12 @@ int arch_add_memory(int nid, u64 start, u64 size, return -EINVAL; }
- if (rodata_full || debug_pagealloc_enabled()) + /* + * KFENCE requires linear map to be mapped at page granularity, so that + * it is possible to protect/unprotect single pages in the KFENCE pool. + */ + if (rodata_full || debug_pagealloc_enabled() || + IS_ENABLED(CONFIG_KFENCE)) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),