From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Set PBHA0 bit in pte entry for VM_PBHA_BIT0 during #PF.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/pgtable.h | 5 +++++ arch/arm64/mm/hugetlbpage.c | 2 ++ include/linux/pbha.h | 16 ++++++++++++++++ mm/memory.c | 4 ++++ 4 files changed, 27 insertions(+)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 396627467238..1ca5e427c603 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -527,6 +527,11 @@ static inline unsigned long __pbha_check_perf_only(unsigned long pbha_val) #define pgprot_pbha(prot, pbha_val) \ __pgprot_modify(prot, PTE_PBHA_MASK, __pbha_check_perf_only(pbha_val))
+static inline pte_t pte_mkpbha(pte_t pte, unsigned long pbha_val) +{ + return set_pte_bit(pte, __pgprot(__pbha_check_perf_only(pbha_val))); +} + #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 99cd6e718408..4effa2dd0518 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -224,6 +224,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, */ WARN_ON(!pte_present(pte));
+ pte = maybe_mk_pbha_bit0(pte, find_vma(mm, addr)); + if (!pte_cont(pte)) { set_pte_at(mm, addr, ptep, pte); return; diff --git a/include/linux/pbha.h b/include/linux/pbha.h index 8c5c79bfc42a..b2b256696af3 100644 --- a/include/linux/pbha.h +++ b/include/linux/pbha.h @@ -5,6 +5,7 @@ #ifndef __LINUX_PBHA_H #define __LINUX_PBHA_H
+#include <linux/mm.h> #include <linux/libfdt.h> #include <linux/pgtable.h>
@@ -33,9 +34,24 @@ static inline pgprot_t pgprot_pbha_bit0(pgprot_t prot)
return pgprot_pbha(prot, PBHA_VAL_BIT0); } + +static inline pte_t maybe_mk_pbha_bit0(pte_t pte, struct vm_area_struct *vma) +{ + if (!system_support_pbha_bit0()) + return pte; + + if (vma->vm_flags & VM_PBHA_BIT0) + pte = pte_mkpbha(pte, PBHA_VAL_BIT0); + + return pte; +} #else static inline bool system_support_pbha_bit0(void) { return false; } static inline pgprot_t pgprot_pbha_bit0(pgprot_t prot) { return prot; } +static inline pte_t maybe_mk_pbha_bit0(pte_t pte, struct vm_area_struct *vma) +{ + return pte; +} #endif
#endif diff --git a/mm/memory.c b/mm/memory.c index 5893c178251a..55d4375d4b27 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -74,6 +74,7 @@ #include <linux/ptrace.h> #include <linux/vmalloc.h> #include <linux/userswap.h> +#include <linux/pbha.h>
#include <trace/events/kmem.h>
@@ -2969,6 +2970,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) entry = mk_pte(new_page, vma->vm_page_prot); entry = pte_sw_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = maybe_mk_pbha_bit0(entry, vma); /* * Clear the pte entry and flush it first, before updating the * pte with the new entry. This will avoid a race condition @@ -3709,6 +3711,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) __SetPageUptodate(page);
entry = mk_pte(page, vma->vm_page_prot); + entry = maybe_mk_pbha_bit0(entry, vma); entry = pte_sw_mkyoung(entry); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); @@ -4016,6 +4019,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, false); } + entry = maybe_mk_pbha_bit0(entry, vma); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
/* no need to invalidate: a not-present page won't be cached */