From: Peter Zijlstra peterz@infradead.org
mainline inclusion from mainline-5.2-rc1 commit 96bc9567cbe112e9320250f01b9c060c882e8619 category: feature bugzilla: NA CVE:NA
--------------------------------
Make issuing a TLB invalidate for page-table pages the normal case.
The reason is twofold:
- too many invalidates is safer than too few, - most architectures use the linux page-tables natively and would thus require this.
Make it an opt-out, instead of an opt-in.
No change in behavior intended.
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Acked-by: Will Deacon will.deacon@arm.com Cc: Andrew Morton akpm@linux-foundation.org Cc: Andy Lutomirski luto@kernel.org Cc: Borislav Petkov bp@alien8.de Cc: Dave Hansen dave.hansen@linux.intel.com Cc: H. Peter Anvin hpa@zytor.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Rik van Riel riel@surriel.com Cc: Thomas Gleixner tglx@linutronix.de Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/Kconfig | 2 +- arch/arm64/Kconfig | 1 - arch/powerpc/Kconfig | 1 + arch/sparc/Kconfig | 1 + arch/x86/Kconfig | 1 - include/asm-generic/tlb.h | 9 +++++---- mm/mmu_gather.c | 2 +- 7 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/arch/Kconfig b/arch/Kconfig index de33474c4381..db3d9a95e055 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -363,7 +363,7 @@ config HAVE_ARCH_JUMP_LABEL config HAVE_RCU_TABLE_FREE bool
-config HAVE_RCU_TABLE_INVALIDATE +config HAVE_RCU_TABLE_NO_INVALIDATE bool
config HAVE_MMU_GATHER_PAGE_SIZE diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8dd00a8cc99f..2f5642f841e5 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -147,7 +147,6 @@ config ARM64 select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_RCU_TABLE_FREE - select HAVE_RCU_TABLE_INVALIDATE select HAVE_RSEQ select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a1db01c77d87..ac66932425d2 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -217,6 +217,7 @@ config PPC select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if SMP + select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_MMU_GATHER_PAGE_SIZE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index e6f2a38d2e61..d90d632868aa 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -64,6 +64,7 @@ config SPARC64 select HAVE_KRETPROBES select HAVE_KPROBES select HAVE_RCU_TABLE_FREE if SMP + select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_MEMBLOCK_NODE_MAP select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_DYNAMIC_FTRACE diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 343a83caebb7..2b0695630031 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -180,7 +180,6 @@ config X86 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if PARAVIRT - select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 0743e03c8ba2..cdf0586b6b8f 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -132,11 +132,12 @@ * When used, an architecture is expected to provide __tlb_remove_table() * which does the actual freeing of these pages. * - * HAVE_RCU_TABLE_INVALIDATE + * HAVE_RCU_TABLE_NO_INVALIDATE * - * This makes HAVE_RCU_TABLE_FREE call tlb_flush_mmu_tlbonly() before freeing - * the page-table pages. Required if you use HAVE_RCU_TABLE_FREE and your - * architecture uses the Linux page-tables natively. + * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before + * freeing the page-table pages. This can be avoided if you use + * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux + * page-tables natively. * * MMU_GATHER_NO_RANGE * diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 07f5706d1f67..2044a71ffb48 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -102,7 +102,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ */ static inline void tlb_table_invalidate(struct mmu_gather *tlb) { -#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE +#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE /* * Invalidate page-table caches used by hardware walkers. Then we still * need to RCU-sched wait while freeing the pages because software