From: Jisheng Zhang Jisheng.Zhang@synaptics.com
maillist inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4XWBS
Reference: https://lore.kernel.org/all/20210524172433.015b3b6b@xhacker.debian/
--------------------------------
Jisheng Zhang has another way of saving memory, we combine his two patches into one and made some adaptations with dynamic kfence objects.
Description of the original patch:
Some architectures may want to allocate the __kfence_pool differently for example, allocate the __kfence_pool earlier before paging_init(). We also delay the memset() to kfence_init_pool().
KFENCE requires linear map to be mapped at page granularity, so that it is possible to protect/unprotect single pages in the KFENCE pool. Currently if KFENCE is enabled, arm64 maps all pages at page granularity, it seems overkilled. In fact, we only need to map the pages in KFENCE pool itself at page granularity. We acchieve this goal by allocating KFENCE pool before paging_init() so we know the KFENCE pool address, then we take care to map the pool at page granularity during map_mem().
Signed-off-by: Jisheng Zhang Jisheng.Zhang@synaptics.com Signed-off-by: Liu Shixin liushixin2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/include/asm/kfence.h | 8 +++++++- arch/arm64/kernel/setup.c | 3 +++ arch/arm64/mm/mmu.c | 34 +++++++++++++++++++++++++-------- include/linux/kfence.h | 2 ++ mm/kfence/core.c | 22 +++++++++++++++++++-- 5 files changed, 58 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index d061176d57ea..64d7cbfe067b 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,9 +8,15 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H
+#include <linux/kfence.h> #include <asm/cacheflush.h>
-static inline bool arch_kfence_init_pool(void) { return true; } +static inline bool arch_kfence_init_pool(void) +{ + memset(__kfence_pool, 0, KFENCE_POOL_SIZE); + + return true; +}
static inline bool kfence_protect_page(unsigned long addr, bool protect) { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 3c834d7c299a..08198b824846 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -18,6 +18,7 @@ #include <linux/screen_info.h> #include <linux/init.h> #include <linux/kexec.h> +#include <linux/kfence.h> #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/interrupt.h> @@ -387,6 +388,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
arm64_memblock_init();
+ kfence_early_alloc_pool(); + efi_fake_memmap(); efi_find_mirror();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c193309ef8e5..c01dc4bb6faa 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/kexec.h> +#include <linux/kfence.h> #include <linux/libfdt.h> #include <linux/mman.h> #include <linux/nodemask.h> @@ -492,10 +493,19 @@ static void __init map_mem(pgd_t *pgdp) int flags = 0; u64 i;
- if (rodata_full || crash_mem_map || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (rodata_full || crash_mem_map || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+#ifdef CONFIG_KFENCE + /* + * KFENCE requires linear map to be mapped at page granularity, so + * temporarily skip mapping for __kfence_pool in the following + * for-loop + */ + if (__kfence_pool) + memblock_mark_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE); +#endif + /* * Take care not to create a writable alias for the * read-only text and rodata sections of the kernel image. @@ -530,6 +540,19 @@ static void __init map_mem(pgd_t *pgdp) __map_memblock(pgdp, kernel_start, kernel_end, PAGE_KERNEL, NO_CONT_MAPPINGS); memblock_clear_nomap(kernel_start, kernel_end - kernel_start); + +#ifdef CONFIG_KFENCE + /* + * Map the __kfence_pool at page granularity now. + */ + if (__kfence_pool) { + __map_memblock(pgdp, __pa(__kfence_pool), + __pa(__kfence_pool + KFENCE_POOL_SIZE), + pgprot_tagged(PAGE_KERNEL), + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE); + } +#endif }
void mark_rodata_ro(void) @@ -1454,12 +1477,7 @@ int arch_add_memory(int nid, u64 start, u64 size, }
- /* - * KFENCE requires linear map to be mapped at page granularity, so that - * it is possible to protect/unprotect single pages in the KFENCE pool. - */ - if (rodata_full || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/include/linux/kfence.h b/include/linux/kfence.h index f77b0e4de937..6486c3dcac36 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -65,6 +65,7 @@ static __always_inline bool is_kfence_address(const void *addr) return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); }
+void __init kfence_early_alloc_pool(void); /** * kfence_alloc_pool() - allocate the KFENCE pool via memblock */ @@ -214,6 +215,7 @@ bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, st #else /* CONFIG_KFENCE */
static inline bool is_kfence_address(const void *addr) { return false; } +static inline void kfence_early_alloc_pool(void) { } static inline void kfence_alloc_pool(void) { } static inline void kfence_init(void) { } static inline void kfence_shutdown_cache(struct kmem_cache *s) { } diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 98f5317368be..2454b1c57fb1 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -860,16 +860,34 @@ static void __init kfence_dynamic_destroy(void) { } #endif
/* === Public interface ===================================================== */ +void __init kfence_early_alloc_pool(void) +{ + if (!kfence_sample_interval) + return; + + __kfence_pool = memblock_alloc_raw(KFENCE_POOL_SIZE, PAGE_SIZE); + + if (!__kfence_pool) { + kfence_sample_interval = 0; + pr_err("failed to early allocate pool, disable KFENCE\n"); + } +}
void __init kfence_alloc_pool(void) { if (!kfence_sample_interval) return;
- if (kfence_dynamic_init()) + if (kfence_dynamic_init()) { + if (__kfence_pool) { + memblock_free(__pa(__kfence_pool), KFENCE_POOL_SIZE); + __kfence_pool = NULL; + } return; + }
- __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (!__kfence_pool) + __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); if (!__kfence_pool) { pr_err("failed to allocate pool\n"); kfence_dynamic_destroy();