From: Bang Li libang.li@antgroup.com
mainline inclusion from mainline-v6.11-rc1 commit 23b1b44e6c61295084284aa7d87db863a7802b92 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAIHPC
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Patch series "Add update_mmu_tlb_range() to simplify code", v4.
This series of commits mainly adds the update_mmu_tlb_range() to batch update tlb in an address range and implement update_mmu_tlb() using update_mmu_tlb_range().
After commit 19eaf44954df ("mm: thp: support allocation of anonymous multi-size THP"), We may need to batch update tlb of a certain address range by calling update_mmu_tlb() in a loop. Using the update_mmu_tlb_range(), we can simplify the code and possibly reduce the execution of some unnecessary code in some architectures.
This patch (of 3):
Add update_mmu_tlb_range(), we can batch update tlb of an address range.
Link: https://lkml.kernel.org/r/20240522061204.117421-1-libang.li@antgroup.com Link: https://lkml.kernel.org/r/20240522061204.117421-2-libang.li@antgroup.com Signed-off-by: Bang Li libang.li@antgroup.com Acked-by: David Hildenbrand david@redhat.com Cc: Chris Zankel chris@zankel.net Cc: Huacai Chen chenhuacai@kernel.org Cc: Lance Yang ioworker0@gmail.com Cc: Max Filippov jcmvbkbc@gmail.com Cc: Palmer Dabbelt palmer@dabbelt.com Cc: Paul Walmsley paul.walmsley@sifive.com Cc: Ryan Roberts ryan.roberts@arm.com Cc: Thomas Bogendoerfer tsbogend@alpha.franken.de Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Liu Shixin liushixin2@huawei.com --- arch/loongarch/include/asm/pgtable.h | 2 ++ arch/mips/include/asm/pgtable.h | 2 ++ arch/riscv/include/asm/pgtable.h | 2 ++ arch/xtensa/include/asm/pgtable.h | 3 +++ arch/xtensa/mm/tlb.c | 6 ++++++ include/linux/pgtable.h | 7 +++++++ 6 files changed, 22 insertions(+)
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 29d9b12298bc..e48efd4a3e3e 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -472,6 +472,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define __HAVE_ARCH_UPDATE_MMU_TLB #define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, addr, ptep, nr) \ + update_mmu_cache_range(NULL, vma, addr, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 430b208c0130..58ada9791e5a 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -596,6 +596,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define __HAVE_ARCH_UPDATE_MMU_TLB #define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, address, ptep, nr) \ + update_mmu_cache_range(NULL, vma, address, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index a16fcdf91f39..93ca36c68833 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -493,6 +493,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
#define __HAVE_ARCH_UPDATE_MMU_TLB #define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, addr, ptep, nr) \ + update_mmu_cache_range(NULL, vma, addr, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 9a7e5e57ee9a..436158bd9030 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -413,6 +413,9 @@ typedef pte_t *pte_addr_t; void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #define __HAVE_ARCH_UPDATE_MMU_TLB +void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr); +#define update_mmu_tlb_range update_mmu_tlb_range
#endif /* !defined (__ASSEMBLY__) */
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 4f974b74883c..b1e1f63de72b 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -169,6 +169,12 @@ void update_mmu_tlb(struct vm_area_struct *vma, local_flush_tlb_page(vma, address); }
+void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) +{ + local_flush_tlb_range(vma, address, address + PAGE_SIZE * nr); +} + #ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr(unsigned vaddr) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 2ac8e48031cb..1494ea7629da 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -715,6 +715,13 @@ static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, * fault. This function updates TLB only, do nothing with cache or others. * It is the difference with function update_mmu_cache. */ +#ifndef update_mmu_tlb_range +static inline void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) +{ +} +#endif + #ifndef __HAVE_ARCH_UPDATE_MMU_TLB static inline void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)