From: Liu Shixin liushixin2@huawei.com
hulk inclusion category: bugfix bugzilla: 181005 https://gitee.com/openeuler/kernel/issues/I4DDEL
-------------------------------------------------
Currently if KFENCE is enabled in arm64, the entire linear map will be mapped at page granularity which seems overkilled. Actually only the kfence pool requires to be mapped at page granularity. We can remove the restriction from KFENCE and force the linear mapping of the kfence pool at page granularity later in arch_kfence_init_pool().
Signed-off-by: Liu Shixin liushixin2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com
Signed-off-by: Chen Jun chenjun102@huawei.com --- arch/arm64/include/asm/kfence.h | 70 ++++++++++++++++++++++++++++++++- arch/arm64/mm/mmu.c | 6 +-- 2 files changed, 71 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index d061176d57ea..322e95bc228d 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,9 +8,77 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H
+#include <linux/kfence.h> #include <asm/cacheflush.h> +#include <asm/pgalloc.h>
-static inline bool arch_kfence_init_pool(void) { return true; } +static inline int split_pud_page(pud_t *pud, unsigned long addr) +{ + int i; + pmd_t *pmd = pmd_alloc_one(&init_mm, addr); + unsigned long pfn = PFN_DOWN(__pa(addr)); + + if (!pmd) + return -ENOMEM; + + for (i = 0; i < PTRS_PER_PMD; i++) + set_pmd(pmd + i, pmd_mkhuge(pfn_pmd(pfn + i * PTRS_PER_PTE, PAGE_KERNEL))); + + smp_wmb(); /* See comment in __pte_alloc */ + pud_populate(&init_mm, pud, pmd); + + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + return 0; +} + +static inline int split_pmd_page(pmd_t *pmd, unsigned long addr) +{ + int i; + pte_t *pte = pte_alloc_one_kernel(&init_mm); + unsigned long pfn = PFN_DOWN(__pa(addr)); + + if (!pte) + return -ENOMEM; + + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL)); + + smp_wmb(); /* See comment in __pte_alloc */ + pmd_populate_kernel(&init_mm, pmd, pte); + + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + return 0; +} + +static inline bool arch_kfence_init_pool(void) +{ + unsigned long addr; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + addr += PAGE_SIZE) { + pgd = pgd_offset(&init_mm, addr); + if (pgd_leaf(*pgd)) + return false; + p4d = p4d_offset(pgd, addr); + if (p4d_leaf(*p4d)) + return false; + pud = pud_offset(p4d, addr); + if (pud_leaf(*pud)) { + if (split_pud_page(pud, addr & PUD_MASK)) + return false; + } + pmd = pmd_offset(pud, addr); + if (pmd_leaf(*pmd)) { + if (split_pmd_page(pmd, addr & PMD_MASK)) + return false; + } + } + return true; +}
static inline bool kfence_protect_page(unsigned long addr, bool protect) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b6a9895d6655..1c2a965e65b3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -492,8 +492,7 @@ static void __init map_mem(pgd_t *pgdp) int flags = 0; u64 i;
- if (rodata_full || crash_mem_map || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (rodata_full || crash_mem_map || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/* @@ -1458,8 +1457,7 @@ int arch_add_memory(int nid, u64 start, u64 size, * KFENCE requires linear map to be mapped at page granularity, so that * it is possible to protect/unprotect single pages in the KFENCE pool. */ - if (rodata_full || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),