From: Cheng Jian cj.chengjian@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4BLL0 CVE: NA
---------------------------
Several control bits are added to control different flush path.
mm use tlb invalidation ipi for flush_tlb_mm range use tlb invalidation ipi for flush_tlb_range page use tlb invalidation ipi for flush_tlb_page switch don't local_flush_tlb_mm when switch_mm
Signed-off-by: Cheng Jian cj.chengjian@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../admin-guide/kernel-parameters.txt | 7 +- arch/arm64/include/asm/mmu_context.h | 6 +- arch/arm64/include/asm/tlbflush.h | 1 + arch/arm64/kernel/tlbflush.c | 77 +++++++++++++++++-- 4 files changed, 82 insertions(+), 9 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 297586ef297ee..6b1fc6dedc034 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -844,10 +844,15 @@ disable= [IPV6] See Documentation/networking/ipv6.txt.
- disable_tlbflush_is + disable_tlbflush_is= [page,range,switch,]mm [ARM64] Disable using TLB instruction to flush all PE within the same inner shareable domain.
+ range use tlb invalidation ipi for flush_tlb_range + page use tlb invalidation ipi for flush_tlb_page + switch don't local_flush_tlb_mm when switch_mm + mm use tlb invalidation ipi for flush_tlb_mm + hardened_usercopy= [KNL] Under CONFIG_HARDENED_USERCOPY, whether hardening is enabled for this boot. Hardened diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index e319ce86fa708..04a7700109a88 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -234,8 +234,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, if (prev != next) { __switch_mm(next); #ifdef CONFIG_ARM64_TLBI_IPI - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - local_flush_tlb_mm(prev); + if (unlikely(test_tlbi_ipi_switch())) { + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + local_flush_tlb_mm(prev); + } #endif }
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index c9307b602fadd..3f1d863ad3d90 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -145,6 +145,7 @@ void flush_tlb_page_nosync(struct vm_area_struct *vma, unsigned long uaddr); void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level); +bool test_tlbi_ipi_switch(void);
static inline void local_flush_tlb_all(void) { diff --git a/arch/arm64/kernel/tlbflush.c b/arch/arm64/kernel/tlbflush.c index e20fbd38fd262..733cf1c612294 100644 --- a/arch/arm64/kernel/tlbflush.c +++ b/arch/arm64/kernel/tlbflush.c @@ -2,6 +2,7 @@ // Copyright (C) 2019 FUJITSU LIMITED
#include <linux/smp.h> +#include <linux/ctype.h> #include <asm/tlbflush.h>
#ifdef CONFIG_ARM64_TLBI_IPI @@ -13,15 +14,79 @@ struct tlb_args { bool ta_last_level; };
-int disable_tlbflush_is;
+unsigned int disable_tlbflush_is; + +#define FLAG_TLBFLUSH_RANGE 0x0001 +#define FLAG_TLBFLUSH_PAGE 0x0002 +#define FLAG_TLBFLUSH_SWITCH 0x0004 +#define FLAG_TLBFLUSH_MM 0x0008 + +#define TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG) \ +bool test_tlbi_ipi_##flag(void) \ +{ \ + return !!(disable_tlbflush_is & FLAG_TLBFLUSH_##FLAG); \ +} +#else +#define TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG) \ +bool test_tlbi_ipi_##flag(void) \ +{ \ + return false; \ +} +#endif + +#define TEST_TLBFLUSH_FLAG(flag, FLAG) \ +static __always_inline TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG) + +TEST_TLBFLUSH_FLAG(mm, MM) +TEST_TLBFLUSH_FLAG(page, PAGE) +TEST_TLBFLUSH_FLAG(range, RANGE) +TEST_TLBFLUSH_FLAG_EXTERN(switch, SWITCH) + +#ifdef CONFIG_ARM64_TLBI_IPI static int __init disable_tlbflush_is_setup(char *str) { - disable_tlbflush_is = 1; + unsigned int flags = 0; + + while (isalpha(*str)) { + if (!strncmp(str, "range,", 6)) { + str += 6; + flags |= FLAG_TLBFLUSH_RANGE; + continue; + } + + if (!strncmp(str, "page,", 5)) { + str += 5; + flags |= FLAG_TLBFLUSH_PAGE; + continue; + } + + if (!strncmp(str, "switch,", 7)) { + str += 7; + flags |= FLAG_TLBFLUSH_SWITCH; + continue; + } + + if (!strcmp(str, "mm")) { + str += 2; + flags |= FLAG_TLBFLUSH_MM; + break; + } + + pr_warn("disable_tlbflush_is: Error, unknown flag\n"); + return 0; + } + + disable_tlbflush_is = flags; + pr_info("DISABLE_TLBFLUSH_IS : [%s] [%s] [%s] [%s]\n", + test_tlbi_ipi_page() ? "PAGE" : "NA", + test_tlbi_ipi_range() ? "RANGE" : "NA", + test_tlbi_ipi_switch() ? "SWITCH" : "NA", + test_tlbi_ipi_mm() ? "MM" : "NA");
return 0; } -__setup("disable_tlbflush_is", disable_tlbflush_is_setup); +early_param("disable_tlbflush_is", disable_tlbflush_is_setup); #endif
static inline void __flush_tlb_mm(struct mm_struct *mm) @@ -46,7 +111,7 @@ static inline void ipi_flush_tlb_mm(void *arg) void flush_tlb_mm(struct mm_struct *mm) { #ifdef CONFIG_ARM64_TLBI_IPI - if (unlikely(disable_tlbflush_is)) + if (unlikely(test_tlbi_ipi_mm())) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, (void *)mm, true); else @@ -83,7 +148,7 @@ void flush_tlb_page_nosync(struct vm_area_struct *vma, unsigned long uaddr) unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
#ifdef CONFIG_ARM64_TLBI_IPI - if (unlikely(disable_tlbflush_is)) + if (unlikely(test_tlbi_ipi_page())) on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page_nosync, &addr, true); else @@ -156,7 +221,7 @@ void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
#ifdef CONFIG_ARM64_TLBI_IPI - if (unlikely(disable_tlbflush_is)) { + if (unlikely(test_tlbi_ipi_range())) { struct tlb_args ta = { .ta_start = start, .ta_end = end,