From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Update kernel pte entries if pbha bit0 enabled. This can be used as a hit for kernel page entry for MMU.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/mm/mmu.c | 14 +++++++++++++- include/linux/pbha.h | 12 ++++++++++++ mm/vmalloc.c | 5 +++++ 4 files changed, 31 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 3e332d47e889..a5cff5b376f6 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -147,6 +147,7 @@ #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ #define PTE_PBHA_MASK (_AT(pteval_t, 0xf) << 59) /* Page Base Hardware Attributes */ +#define PTE_PBHA0 (_AT(pteval_t, 1) << 59) /* PBHA 59 bit */
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) #ifdef CONFIG_ARM64_PA_BITS_52 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 78b9e489d8f6..462d5e6b4ddf 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -23,6 +23,7 @@ #include <linux/io.h> #include <linux/mm.h> #include <linux/vmalloc.h> +#include <linux/pbha.h>
#include <asm/barrier.h> #include <asm/cputype.h> @@ -126,6 +127,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new) */ pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+#ifdef CONFIG_ARM64_PBHA + mask |= PTE_PBHA0; +#endif + /* creating or taking down mappings is always safe */ if (old == 0 || new == 0) return true; @@ -372,6 +377,8 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) return;
+ prot = pbha_bit0_update_pgprot(prot); + phys &= PAGE_MASK; addr = virt & PAGE_MASK; end = PAGE_ALIGN(virt + size); @@ -1152,6 +1159,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, { unsigned long addr = start; unsigned long next; + pgprot_t prot; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; @@ -1180,7 +1188,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, if (!p) return -ENOMEM;
- pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); + prot = __pgprot(PROT_SECT_NORMAL); + prot = pbha_bit0_update_pgprot(prot); + + pmd_set_huge(pmdp, __pa(p), prot); } else vmemmap_verify((pte_t *)pmdp, node, addr, next); } while (addr = next, addr != end); @@ -1300,6 +1311,7 @@ void __set_fixmap(enum fixed_addresses idx, ptep = fixmap_pte(addr);
if (pgprot_val(flags)) { + flags = pbha_bit0_update_pgprot(flags); set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); } else { pte_clear(&init_mm, addr, ptep); diff --git a/include/linux/pbha.h b/include/linux/pbha.h index 45261d163b1b..9fd9c8114351 100644 --- a/include/linux/pbha.h +++ b/include/linux/pbha.h @@ -7,6 +7,9 @@
#include <linux/efi.h> #include <linux/libfdt.h> +#include <linux/pgtable.h> + +#define PBHA_VAL_BIT0 1UL
#define EFI_OEMCONFIG_VARIABLE_GUID \ EFI_GUID(0x21f3b3c5, 0x946d, 0x41c1, 0x83, 0x8c, 0x19, 0x4e, 0x48, \ @@ -23,8 +26,17 @@ static inline bool system_support_pbha_bit0(void) { return pbha_bit0_enabled; } + +static inline pgprot_t pbha_bit0_update_pgprot(pgprot_t prot) +{ + if (!system_support_pbha_bit0()) + return prot; + + return pgprot_pbha(prot, PBHA_VAL_BIT0); +} #else static inline bool system_support_pbha_bit0(void) { return false; } +static inline pgprot_t pbha_bit0_update_pgprot(pgprot_t prot) { return prot; } #endif
#endif diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e27cd716ca95..ee23b53decad 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -38,6 +38,7 @@ #include <linux/uaccess.h> #include <linux/hugetlb.h> #include <linux/share_pool.h> +#include <linux/pbha.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> @@ -307,6 +308,8 @@ int vmap_range(unsigned long addr, unsigned long end, { int err;
+ prot = pbha_bit0_update_pgprot(prot); + err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift); flush_cache_vmap(addr, end);
@@ -549,6 +552,8 @@ static int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
WARN_ON(page_shift < PAGE_SHIFT);
+ prot = pbha_bit0_update_pgprot(prot); + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || page_shift == PAGE_SHIFT) return vmap_small_pages_range_noflush(addr, end, prot, pages);