From: Liu Shixin liushixin2@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4XWBS
--------------------------------
This reverts commit d3d0ca13f958d359c80c8719b23cd7367eb881f2
We found that this patch may lead to a TLB conflicts abort. This may results by the changes from block<->table mappings. This problem may have something to do with the Break-Before-Make sequence rule but not yet clear.
Signed-off-by: Liu Shixin liushixin2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/include/asm/kfence.h | 70 +-------------------------------- arch/arm64/mm/mmu.c | 6 ++- 2 files changed, 5 insertions(+), 71 deletions(-)
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index 322e95bc228d..d061176d57ea 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,77 +8,9 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H
-#include <linux/kfence.h> #include <asm/cacheflush.h> -#include <asm/pgalloc.h>
-static inline int split_pud_page(pud_t *pud, unsigned long addr) -{ - int i; - pmd_t *pmd = pmd_alloc_one(&init_mm, addr); - unsigned long pfn = PFN_DOWN(__pa(addr)); - - if (!pmd) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_PMD; i++) - set_pmd(pmd + i, pmd_mkhuge(pfn_pmd(pfn + i * PTRS_PER_PTE, PAGE_KERNEL))); - - smp_wmb(); /* See comment in __pte_alloc */ - pud_populate(&init_mm, pud, pmd); - - flush_tlb_kernel_range(addr, addr + PUD_SIZE); - return 0; -} - -static inline int split_pmd_page(pmd_t *pmd, unsigned long addr) -{ - int i; - pte_t *pte = pte_alloc_one_kernel(&init_mm); - unsigned long pfn = PFN_DOWN(__pa(addr)); - - if (!pte) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_PTE; i++) - set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL)); - - smp_wmb(); /* See comment in __pte_alloc */ - pmd_populate_kernel(&init_mm, pmd, pte); - - flush_tlb_kernel_range(addr, addr + PMD_SIZE); - return 0; -} - -static inline bool arch_kfence_init_pool(void) -{ - unsigned long addr; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - - for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); - addr += PAGE_SIZE) { - pgd = pgd_offset(&init_mm, addr); - if (pgd_leaf(*pgd)) - return false; - p4d = p4d_offset(pgd, addr); - if (p4d_leaf(*p4d)) - return false; - pud = pud_offset(p4d, addr); - if (pud_leaf(*pud)) { - if (split_pud_page(pud, addr & PUD_MASK)) - return false; - } - pmd = pmd_offset(pud, addr); - if (pmd_leaf(*pmd)) { - if (split_pmd_page(pmd, addr & PMD_MASK)) - return false; - } - } - return true; -} +static inline bool arch_kfence_init_pool(void) { return true; }
static inline bool kfence_protect_page(unsigned long addr, bool protect) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 3b2937ae5e98..7314a7a3613f 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -477,7 +477,8 @@ static void __init map_mem(pgd_t *pgdp) int flags = 0, eflags = 0; u64 i;
- if (rodata_full || debug_pagealloc_enabled()) + if (rodata_full || debug_pagealloc_enabled() || + IS_ENABLED(CONFIG_KFENCE)) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/* @@ -1486,7 +1487,8 @@ int arch_add_memory(int nid, u64 start, u64 size, * KFENCE requires linear map to be mapped at page granularity, so that * it is possible to protect/unprotect single pages in the KFENCE pool. */ - if (rodata_full || debug_pagealloc_enabled()) + if (rodata_full || debug_pagealloc_enabled() || + IS_ENABLED(CONFIG_KFENCE)) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),