From: Ryan Roberts ryan.roberts@arm.com
mainline inclusion from mainline-v6.9-rc1 commit 659e193027910a5d3083e34b488ab459d2ef5082 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CUEQ CVE: NA
-------------------------------------------------
Since set_ptes() was introduced, set_pte_at() has been implemented as a generic macro around set_ptes(..., 1). So this change should continue to generate the same code. However, making this change prepares us for the transparent contpte support. It means we can reroute set_ptes() to __set_ptes(). Since set_pte_at() is a generic macro, there will be no equivalent __set_pte_at() to reroute to.
Note that a couple of calls to set_pte_at() remain in the arch code. This is intentional, since those call sites are acting on behalf of core-mm and should continue to call into the public set_ptes() rather than the arch-private __set_ptes().
Link: https://lkml.kernel.org/r/20240215103205.2607016-9-ryan.roberts@arm.com Signed-off-by: Ryan Roberts ryan.roberts@arm.com Tested-by: John Hubbard jhubbard@nvidia.com Acked-by: Mark Rutland mark.rutland@arm.com Acked-by: Catalin Marinas catalin.marinas@arm.com Cc: Alistair Popple apopple@nvidia.com Cc: Andrey Ryabinin ryabinin.a.a@gmail.com Cc: Ard Biesheuvel ardb@kernel.org Cc: Barry Song 21cnbao@gmail.com Cc: Borislav Petkov (AMD) bp@alien8.de Cc: Dave Hansen dave.hansen@linux.intel.com Cc: David Hildenbrand david@redhat.com Cc: "H. Peter Anvin" hpa@zytor.com Cc: Ingo Molnar mingo@redhat.com Cc: James Morse james.morse@arm.com Cc: Kefeng Wang wangkefeng.wang@huawei.com Cc: Marc Zyngier maz@kernel.org Cc: Matthew Wilcox (Oracle) willy@infradead.org Cc: Thomas Gleixner tglx@linutronix.de Cc: Will Deacon will@kernel.org Cc: Yang Shi shy828301@gmail.com Cc: Zi Yan ziy@nvidia.com Signed-off-by: Andrew Morton akpm@linux-foundation.org (cherry picked from commit 659e193027910a5d3083e34b488ab459d2ef5082) Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com --- arch/arm64/include/asm/pgtable.h | 2 +- arch/arm64/kernel/mte.c | 2 +- arch/arm64/kvm/guest.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/arm64/mm/hugetlbpage.c | 10 +++++----- 5 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index de034ca40bad..9a2df85eb493 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1084,7 +1084,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) #endif /* CONFIG_ARM64_MTE */
/* - * On AArch64, the cache coherency is handled via the set_pte_at() function. + * On AArch64, the cache coherency is handled via the set_ptes() function. */ static inline void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 2fb5e7a7a4d5..b99b718164ed 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -67,7 +67,7 @@ int memcmp_pages(struct page *page1, struct page *page2) /* * If the page content is identical but at least one of the pages is * tagged, return non-zero to avoid KSM merging. If only one of the - * pages is tagged, set_pte_at() may zero or change the tags of the + * pages is tagged, set_ptes() may zero or change the tags of the * other page via mte_sync_tags(). */ if (page_mte_tagged(page1) || page_mte_tagged(page2)) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 89e33337ade9..8b53d0fa3ff0 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1110,7 +1110,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, } else { /* * Only locking to serialise with a concurrent - * set_pte_at() in the VMM but still overriding the + * set_ptes() in the VMM but still overriding the * tags, hence ignoring the return value. */ try_page_mte_tagging(page); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 83b5db752fa1..dfeda64fc4e5 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -231,7 +231,7 @@ static void show_pte(unsigned long addr) * * It needs to cope with hardware update of the accessed/dirty state by other * agents in the system and can safely skip the __sync_icache_dcache() call as, - * like set_pte_at(), the PTE is never changed from no-exec to exec here. + * like set_ptes(), the PTE is never changed from no-exec to exec here. * * Returns whether or not the PTE actually changed. */ diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index de1a7102b5ae..dce6a0e59a67 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -254,12 +254,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
if (!pte_present(pte)) { for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) - set_pte_at(mm, addr, ptep, pte); + set_ptes(mm, addr, ptep, pte, 1); return; }
if (!pte_cont(pte)) { - set_pte_at(mm, addr, ptep, pte); + set_ptes(mm, addr, ptep, pte, 1); return; }
@@ -270,7 +270,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, clear_flush(mm, addr, ptep, pgsize, ncontig);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); }
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, @@ -478,7 +478,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
hugeprot = pte_pgprot(pte); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
return 1; } @@ -507,7 +507,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, pfn = pte_pfn(pte);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); }
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,