From: Cheng Jian cj.chengjian@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4BLL0 CVE: NA
---------------------------
The patchset cann't be merged into the mainline community, but the modules it modify is very important, and we are always careful here.
This patch restores all the original code, so that if we disable CONFIG_ARM64_TLBI_IPI, There is no logic change for the original logic.
Signed-off-by: Cheng Jian cj.chengjian@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/tlbflush.h | 86 ++++++++++++++++++++++++++----- arch/arm64/kernel/Makefile | 4 +- arch/arm64/kernel/tlbflush.c | 24 --------- 3 files changed, 75 insertions(+), 39 deletions(-)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 3f1d863ad3d90..2051277ebc33d 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -139,14 +139,6 @@ * on top of these routines, since that is our interface to the mmu_gather * API as used by munmap() and friends. */ - -void flush_tlb_mm(struct mm_struct *mm); -void flush_tlb_page_nosync(struct vm_area_struct *vma, - unsigned long uaddr); -void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end, unsigned long stride, bool last_level); -bool test_tlbi_ipi_switch(void); - static inline void local_flush_tlb_all(void) { dsb(nshst); @@ -163,6 +155,21 @@ static inline void flush_tlb_all(void) isb(); }
+/* + * This is meant to avoid soft lock-ups on large TLB flushing ranges and not + * necessarily a performance improvement. + */ +#define MAX_TLBI_OPS PTRS_PER_PTE + +#ifdef CONFIG_ARM64_TLBI_IPI + +void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_page_nosync(struct vm_area_struct *vma, + unsigned long uaddr); +void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long stride, bool last_level); +bool test_tlbi_ipi_switch(void); + static inline void local_flush_tlb_mm(struct mm_struct *mm) { unsigned long asid = __TLBI_VADDR(0, ASID(mm)); @@ -173,6 +180,63 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) dsb(nsh); }
+#else /* CONFIG_ARM64_TLBI_IPI */ + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + unsigned long asid = __TLBI_VADDR(0, ASID(mm)); + + dsb(ishst); + __tlbi(aside1is, asid); + __tlbi_user(aside1is, asid); + dsb(ish); +} + +static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, + unsigned long uaddr) +{ + unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); + + dsb(ishst); + __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); +} + +static inline void __flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long stride, bool last_level) +{ + unsigned long asid = ASID(vma->vm_mm); + unsigned long addr; + + start = round_down(start, stride); + end = round_up(end, stride); + + if ((end - start) >= (MAX_TLBI_OPS * stride)) { + flush_tlb_mm(vma->vm_mm); + return; + } + + /* Convert the stride into units of 4k */ + stride >>= 12; + + start = __TLBI_VADDR(start, asid); + end = __TLBI_VADDR(end, asid); + + dsb(ishst); + for (addr = start; addr < end; addr += stride) { + if (last_level) { + __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); + } else { + __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); + } + } + dsb(ish); +} +#endif /* CONFIG_ARM64_TLBI_IPI */ + static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { @@ -180,12 +244,6 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, dsb(ish); }
-/* - * This is meant to avoid soft lock-ups on large TLB flushing ranges and not - * necessarily a performance improvement. - */ -#define MAX_TLBI_OPS PTRS_PER_PTE - static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index fcb9d64d651cc..e34b9b5969bfb 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -19,7 +19,9 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ - syscall.o tlbflush.o + syscall.o + +arm64-obj-$(CONFIG_ARM64_TLBI_IPI) += tlbflush.o
extra-$(CONFIG_EFI) := efi-entry.o
diff --git a/arch/arm64/kernel/tlbflush.c b/arch/arm64/kernel/tlbflush.c index 733cf1c612294..9a51941b18b3b 100644 --- a/arch/arm64/kernel/tlbflush.c +++ b/arch/arm64/kernel/tlbflush.c @@ -5,7 +5,6 @@ #include <linux/ctype.h> #include <asm/tlbflush.h>
-#ifdef CONFIG_ARM64_TLBI_IPI struct tlb_args { struct vm_area_struct *ta_vma; unsigned long ta_start; @@ -27,13 +26,6 @@ bool test_tlbi_ipi_##flag(void) \ { \ return !!(disable_tlbflush_is & FLAG_TLBFLUSH_##FLAG); \ } -#else -#define TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG) \ -bool test_tlbi_ipi_##flag(void) \ -{ \ - return false; \ -} -#endif
#define TEST_TLBFLUSH_FLAG(flag, FLAG) \ static __always_inline TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG) @@ -99,26 +91,20 @@ static inline void __flush_tlb_mm(struct mm_struct *mm) dsb(ish); }
-#ifdef CONFIG_ARM64_TLBI_IPI static inline void ipi_flush_tlb_mm(void *arg) { struct mm_struct *mm = arg;
local_flush_tlb_mm(mm); } -#endif
void flush_tlb_mm(struct mm_struct *mm) { -#ifdef CONFIG_ARM64_TLBI_IPI if (unlikely(test_tlbi_ipi_mm())) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, (void *)mm, true); else __flush_tlb_mm(mm); -#else - __flush_tlb_mm(mm); -#endif }
static inline void __flush_tlb_page_nosync(unsigned long addr) @@ -147,15 +133,11 @@ void flush_tlb_page_nosync(struct vm_area_struct *vma, unsigned long uaddr) { unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
-#ifdef CONFIG_ARM64_TLBI_IPI if (unlikely(test_tlbi_ipi_page())) on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page_nosync, &addr, true); else __flush_tlb_page_nosync(addr); -#else - __flush_tlb_page_nosync(addr); -#endif }
static inline void ___flush_tlb_range(unsigned long start, unsigned long end, @@ -189,7 +171,6 @@ static inline void __local_flush_tlb_range(unsigned long addr, bool last_level) dsb(nsh); }
-#ifdef CONFIG_ARM64_TLBI_IPI static inline void ipi_flush_tlb_range(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; @@ -198,7 +179,6 @@ static inline void ipi_flush_tlb_range(void *arg) for (addr = ta->ta_start; addr < ta->ta_end; addr += ta->ta_stride) __local_flush_tlb_range(addr, ta->ta_last_level); } -#endif
void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level) @@ -220,7 +200,6 @@ void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, end = __TLBI_VADDR(end, asid);
-#ifdef CONFIG_ARM64_TLBI_IPI if (unlikely(test_tlbi_ipi_range())) { struct tlb_args ta = { .ta_start = start, @@ -233,7 +212,4 @@ void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, &ta, true); } else ___flush_tlb_range(start, end, stride, last_level); -#else - ___flush_tlb_range(start, end, stride, last_level); -#endif }