ARMv8.4 TTL feature.
Catalin Marinas (1): arm64: Shift the __tlbi_level() indentation left
Marc Zyngier (2): arm64: Detect the ARMv8.4 TTL feature arm64: Add level-hinted TLB invalidation helper
Peter Zijlstra (Intel) (1): tlb: mmu_gather: add tlb_flush_*_range APIs
Zhenyu Ye (4): arm64: Add tlbi_user_level TLB invalidation helper arm64: tlb: Set the TTL field in flush_tlb_range arm64: tlb: Set the TTL field in flush_*_tlb_range arm64: tlb: don't set the ttl value in flush_tlb_page_nosync
arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/pgtable.h | 10 ++++ arch/arm64/include/asm/stage2_pgtable.h | 9 ++++ arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/include/asm/tlb.h | 29 +++++++++++- arch/arm64/include/asm/tlbflush.h | 63 ++++++++++++++++++++++--- arch/arm64/kernel/cpufeature.c | 11 +++++ include/asm-generic/tlb.h | 55 +++++++++++++++------ 8 files changed, 158 insertions(+), 23 deletions(-)
From: Marc Zyngier maz@kernel.org
mainline inclusion from mainline-v5.9-rc1 commit 552ae76face5 category: feature bugzilla: NA CVE: NA
-------------------------------------------------
In order to reduce the cost of TLB invalidation, the ARMv8.4 TTL feature allows TLBs to be issued with a level allowing for quicker invalidation.
Let's detect the feature for now. Further patches will implement its actual usage.
Reviewed-by : Suzuki K Polose suzuki.poulose@arm.com Reviewed-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Marc Zyngier maz@kernel.org
Conflicts: arch/arm64/include/asm/cpucaps.h arch/arm64/include/asm/sysreg.h arch/arm64/kernel/cpufeature.c
Signed-off-by: Chen Zhou chenzhou10@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kernel/cpufeature.c | 11 +++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index e28976203fd5..92d1f3cd8057 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -66,7 +66,8 @@ #define ARM64_HAS_ADDRESS_AUTH 45 #define ARM64_HAS_GENERIC_AUTH 46 #define ARM64_HAS_CNP 47 +#define ARM64_HAS_ARMv8_4_TTL 48
-#define ARM64_NCAPS 48 +#define ARM64_NCAPS 49
#endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 7d1fce7db714..25635f958c8b 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -635,6 +635,7 @@ #define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */ +#define ID_AA64MMFR2_TTL_SHIFT 48 #define ID_AA64MMFR2_FWB_SHIFT 40 #define ID_AA64MMFR2_AT_SHIFT 32 #define ID_AA64MMFR2_LVA_SHIFT 16 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f9bc3603e77e..f6ad941c8417 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -222,6 +222,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { };
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), @@ -1552,6 +1553,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .cpu_enable = cpu_has_fwb, }, + { + .desc = "ARMv8.4 Translation Table Level", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_HAS_ARMv8_4_TTL, + .sys_reg = SYS_ID_AA64MMFR2_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64MMFR2_TTL_SHIFT, + .min_field_value = 1, + .matches = has_cpuid_feature, + }, #ifdef CONFIG_ARM64_HW_AFDBM { /*
From: Marc Zyngier maz@kernel.org
mainline inclusion from mainline-v5.9-rc1 commit c10bc62ae4d2 category: feature bugzilla: NA CVE: NA
-------------------------------------------------
Add a level-hinted TLB invalidation helper that only gets used if ARMv8.4-TTL gets detected.
Reviewed-by: Alexandru Elisei alexandru.elisei@arm.com Reviewed-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Marc Zyngier maz@kernel.org Signed-off-by: Chen Zhou chenzhou10@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/stage2_pgtable.h | 9 +++++ arch/arm64/include/asm/tlbflush.h | 45 +++++++++++++++++++++++++ 2 files changed, 54 insertions(+)
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index 5412fa40825e..5896b071023d 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -241,4 +241,13 @@ stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) return (boundary - 1 < end - 1) ? boundary : end; }
+/* + * Level values for the ARMv8.4-TTL extension, mapping PUD/PMD/PTE and + * the architectural page-table level. + */ +#define S2_NO_LEVEL_HINT 0 +#define S2_PUD_LEVEL 1 +#define S2_PMD_LEVEL 2 +#define S2_PTE_LEVEL 3 + #endif /* __ARM64_S2_PGTABLE_H_ */ diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 178719950e74..d5532963d9f8 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -21,6 +21,7 @@
#ifndef __ASSEMBLY__
+#include <linux/bitfield.h> #include <linux/mm_types.h> #include <linux/sched.h> #include <asm/cputype.h> @@ -70,6 +71,50 @@ __ta; \ })
+/* + * Level-based TLBI operations. + * + * When ARMv8.4-TTL exists, TLBI operations take an additional hint for + * the level at which the invalidation must take place. If the level is + * wrong, no invalidation may take place. In the case where the level + * cannot be easily determined, a 0 value for the level parameter will + * perform a non-hinted invalidation. + * + * For Stage-2 invalidation, use the level values provided to that effect + * in asm/stage2_pgtable.h. + */ +#define TLBI_TTL_MASK GENMASK_ULL(47, 44) +#define TLBI_TTL_TG_4K 1 +#define TLBI_TTL_TG_16K 2 +#define TLBI_TTL_TG_64K 3 + +#define __tlbi_level(op, addr, level) \ + do { \ + u64 arg = addr; \ + \ + if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \ + level) { \ + u64 ttl = level & 3; \ + \ + switch (PAGE_SIZE) { \ + case SZ_4K: \ + ttl |= TLBI_TTL_TG_4K << 2; \ + break; \ + case SZ_16K: \ + ttl |= TLBI_TTL_TG_16K << 2; \ + break; \ + case SZ_64K: \ + ttl |= TLBI_TTL_TG_64K << 2; \ + break; \ + } \ + \ + arg &= ~TLBI_TTL_MASK; \ + arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \ + } \ + \ + __tlbi(op, arg); \ + } while(0) + /* * TLB Invalidation * ================
From: Zhenyu Ye yezhenyu2@huawei.com
mainline inclusion from mainline-v5.9-rc1 commit e735b98a5fe0 category: feature bugzilla: NA CVE: NA
-------------------------------------------------
Add a level-hinted parameter to __tlbi_user, which only gets used if ARMv8.4-TTL gets detected.
ARMv8.4-TTL provides the TTL field in tlbi instruction to indicate the level of translation table walk holding the leaf entry for the address that is being invalidated.
This patch set the default level value of flush_tlb_range() to 0, which will be updated in future patches. And set the ttl value of flush_tlb_page_nosync() to 3 because it is only called to flush a single pte page.
Signed-off-by: Zhenyu Ye yezhenyu2@huawei.com Link: https://lore.kernel.org/r/20200625080314.230-4-yezhenyu2@huawei.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Chen Zhou chenzhou10@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/tlbflush.h | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index d5532963d9f8..25d8d33bba5e 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -115,6 +115,11 @@ __tlbi(op, arg); \ } while(0)
+#define __tlbi_user_level(op, arg, level) do { \ + if (arm64_kernel_unmapped_at_el0()) \ + __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ +} while (0) + /* * TLB Invalidation * ================ @@ -216,8 +221,9 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
dsb(ishst); - __tlbi(vale1is, addr); - __tlbi_user(vale1is, addr); + /* This function is only called on a small page */ + __tlbi_level(vale1is, addr, 3); + __tlbi_user_level(vale1is, addr, 3); }
static inline void flush_tlb_page(struct vm_area_struct *vma, @@ -257,11 +263,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, dsb(ishst); for (addr = start; addr < end; addr += stride) { if (last_level) { - __tlbi(vale1is, addr); - __tlbi_user(vale1is, addr); + __tlbi_level(vale1is, addr, 0); + __tlbi_user_level(vale1is, addr, 0); } else { - __tlbi(vae1is, addr); - __tlbi_user(vae1is, addr); + __tlbi_level(vae1is, addr, 0); + __tlbi_user_level(vae1is, addr, 0); } } dsb(ish);
From: "Peter Zijlstra (Intel)" peterz@infradead.org
mainline inclusion from mainline-v5.9-rc1 commit 2631ed00b049 category: feature bugzilla: NA CVE: NA
-------------------------------------------------
tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, then set corresponding cleared_*.
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Zhenyu Ye yezhenyu2@huawei.com Acked-by: Catalin Marinas catalin.marinas@arm.com Link: https://lore.kernel.org/r/20200625080314.230-5-yezhenyu2@huawei.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Chen Zhou chenzhou10@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/asm-generic/tlb.h | 55 ++++++++++++++++++++++++++++----------- 1 file changed, 40 insertions(+), 15 deletions(-)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 6be86c1c5c58..147381aad7cc 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -250,6 +250,38 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) #define tlb_end_vma __tlb_end_vma #endif
+/* + * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, + * and set corresponding cleared_*. + */ +static inline void tlb_flush_pte_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + __tlb_adjust_range(tlb, address, size); + tlb->cleared_ptes = 1; +} + +static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + __tlb_adjust_range(tlb, address, size); + tlb->cleared_pmds = 1; +} + +static inline void tlb_flush_pud_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + __tlb_adjust_range(tlb, address, size); + tlb->cleared_puds = 1; +} + +static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + __tlb_adjust_range(tlb, address, size); + tlb->cleared_p4ds = 1; +} + #ifndef __tlb_remove_tlb_entry #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #endif @@ -263,19 +295,17 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) */ #define tlb_remove_tlb_entry(tlb, ptep, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ - tlb->cleared_ptes = 1; \ + tlb_flush_pte_range(tlb, address, PAGE_SIZE); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ do { \ unsigned long _sz = huge_page_size(h); \ - __tlb_adjust_range(tlb, address, _sz); \ if (_sz == PMD_SIZE) \ - tlb->cleared_pmds = 1; \ + tlb_flush_pmd_range(tlb, address, _sz); \ else if (_sz == PUD_SIZE) \ - tlb->cleared_puds = 1; \ + tlb_flush_pud_range(tlb, address, _sz); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0)
@@ -289,8 +319,7 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ do { \ - __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ - tlb->cleared_pmds = 1; \ + tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0)
@@ -304,8 +333,7 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ do { \ - __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ - tlb->cleared_puds = 1; \ + tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \ __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ } while (0)
@@ -330,9 +358,8 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) #ifndef pte_free_tlb #define pte_free_tlb(tlb, ptep, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ - tlb->cleared_pmds = 1; \ __pte_free_tlb(tlb, ptep, address); \ } while (0) #endif @@ -340,9 +367,8 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) #ifndef pmd_free_tlb #define pmd_free_tlb(tlb, pmdp, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb_flush_pud_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ - tlb->cleared_puds = 1; \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) #endif @@ -351,9 +377,8 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) #ifndef pud_free_tlb #define pud_free_tlb(tlb, pudp, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ - tlb->cleared_p4ds = 1; \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif
From: Zhenyu Ye yezhenyu2@huawei.com
mainline inclusion from mainline-v5.9-rc1 commit c4ab2cbc1d87 category: feature bugzilla: NA CVE: NA
-------------------------------------------------
This patch uses the cleared_* in struct mmu_gather to set the TTL field in flush_tlb_range().
Signed-off-by: Zhenyu Ye yezhenyu2@huawei.com Reviewed-by: Catalin Marinas catalin.marinas@arm.com Link: https://lore.kernel.org/r/20200625080314.230-6-yezhenyu2@huawei.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Chen Zhou chenzhou10@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/tlb.h | 29 ++++++++++++++++++++++++++++- arch/arm64/include/asm/tlbflush.h | 14 ++++++++------ 2 files changed, 36 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 106fdc951b6e..eb49cf9c36f4 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -31,11 +31,37 @@ static void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h>
+/* + * get the tlbi levels in arm64. Default value is 0 if more than one + * of cleared_* is set or neither is set. + * Arm64 doesn't support p4ds now. + */ +static inline int tlb_get_level(struct mmu_gather *tlb) +{ + if (tlb->cleared_ptes && !(tlb->cleared_pmds || + tlb->cleared_puds || + tlb->cleared_p4ds)) + return 3; + + if (tlb->cleared_pmds && !(tlb->cleared_ptes || + tlb->cleared_puds || + tlb->cleared_p4ds)) + return 2; + + if (tlb->cleared_puds && !(tlb->cleared_ptes || + tlb->cleared_pmds || + tlb->cleared_p4ds)) + return 1; + + return 0; +} + static inline void tlb_flush(struct mmu_gather *tlb) { struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); bool last_level = !tlb->freed_tables; unsigned long stride = tlb_get_unmap_size(tlb); + int tlb_level = tlb_get_level(tlb);
/* * If we're tearing down the address space then we only care about @@ -48,7 +74,8 @@ static inline void tlb_flush(struct mmu_gather *tlb) return; }
- __flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level); + __flush_tlb_range(&vma, tlb->start, tlb->end, stride, + last_level, tlb_level); }
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 25d8d33bba5e..6e07e5d753b6 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -241,7 +241,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, - unsigned long stride, bool last_level) + unsigned long stride, bool last_level, + int tlb_level) { unsigned long asid = ASID(vma->vm_mm); unsigned long addr; @@ -263,11 +264,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, dsb(ishst); for (addr = start; addr < end; addr += stride) { if (last_level) { - __tlbi_level(vale1is, addr, 0); - __tlbi_user_level(vale1is, addr, 0); + __tlbi_level(vale1is, addr, tlb_level); + __tlbi_user_level(vale1is, addr, tlb_level); } else { - __tlbi_level(vae1is, addr, 0); - __tlbi_user_level(vae1is, addr, 0); + __tlbi_level(vae1is, addr, tlb_level); + __tlbi_user_level(vae1is, addr, tlb_level); } } dsb(ish); @@ -279,8 +280,9 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, /* * We cannot use leaf-only invalidation here, since we may be invalidating * table entries as part of collapsing hugepages or moving page tables. + * Set the tlb_level to 0 because we can not get enough information here. */ - __flush_tlb_range(vma, start, end, PAGE_SIZE, false); + __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0); }
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
From: Zhenyu Ye yezhenyu2@huawei.com
mainline inclusion from mainline-v5.9-rc1 commit a7ac1cfa4c05 category: feature bugzilla: NA CVE: NA
-------------------------------------------------
This patch implement flush_{pmd|pud}_tlb_range() in arm64 by calling __flush_tlb_range() with the corresponding stride and tlb_level values.
Signed-off-by: Zhenyu Ye yezhenyu2@huawei.com Link: https://lore.kernel.org/r/20200625080314.230-7-yezhenyu2@huawei.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Chen Zhou chenzhou10@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/pgtable.h | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 6341ca2b4472..0662acc42d57 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -51,6 +51,16 @@ extern void __pmd_error(const char *file, int line, unsigned long val); extern void __pud_error(const char *file, int line, unsigned long val); extern void __pgd_error(const char *file, int line, unsigned long val);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE + +/* Set stride and tlb_level in flush_*_tlb_range */ +#define flush_pmd_tlb_range(vma, addr, end) \ + __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) +#define flush_pud_tlb_range(vma, addr, end) \ + __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc..
From: Catalin Marinas catalin.marinas@arm.com
mainline inclusion from mainline-v5.9-rc1 commit 34e36d81a0ef category: feature bugzilla: NA CVE: NA
-------------------------------------------------
This is for consistency with the other __tlbi macros in this file. No functional change.
Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Chen Zhou chenzhou10@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/tlbflush.h | 43 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 6e07e5d753b6..4b76c6ea9d1b 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -88,32 +88,31 @@ #define TLBI_TTL_TG_16K 2 #define TLBI_TTL_TG_64K 3
-#define __tlbi_level(op, addr, level) \ - do { \ - u64 arg = addr; \ +#define __tlbi_level(op, addr, level) do { \ + u64 arg = addr; \ \ - if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \ - level) { \ - u64 ttl = level & 3; \ + if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \ + level) { \ + u64 ttl = level & 3; \ \ - switch (PAGE_SIZE) { \ - case SZ_4K: \ - ttl |= TLBI_TTL_TG_4K << 2; \ - break; \ - case SZ_16K: \ - ttl |= TLBI_TTL_TG_16K << 2; \ - break; \ - case SZ_64K: \ - ttl |= TLBI_TTL_TG_64K << 2; \ - break; \ - } \ - \ - arg &= ~TLBI_TTL_MASK; \ - arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \ + switch (PAGE_SIZE) { \ + case SZ_4K: \ + ttl |= TLBI_TTL_TG_4K << 2; \ + break; \ + case SZ_16K: \ + ttl |= TLBI_TTL_TG_16K << 2; \ + break; \ + case SZ_64K: \ + ttl |= TLBI_TTL_TG_64K << 2; \ + break; \ } \ \ - __tlbi(op, arg); \ - } while(0) + arg &= ~TLBI_TTL_MASK; \ + arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \ + } \ + \ + __tlbi(op, arg); \ +} while(0)
#define __tlbi_user_level(op, arg, level) do { \ if (arm64_kernel_unmapped_at_el0()) \
From: Zhenyu Ye yezhenyu2@huawei.com
mainline inclusion from mainline-v5.9-rc1 commit 61c11656b67b category: feature bugzilla: NA CVE: NA
-------------------------------------------------
flush_tlb_page_nosync() may be called from pmd level, so we can not set the ttl = 3 here.
The callstack is as follows:
pmdp_set_access_flags ptep_set_access_flags flush_tlb_fix_spurious_fault flush_tlb_page flush_tlb_page_nosync
Fixes: e735b98a5fe0 ("arm64: Add tlbi_user_level TLB invalidation helper") Reported-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Zhenyu Ye yezhenyu2@huawei.com Link: https://lore.kernel.org/r/20200710094158.468-1-yezhenyu2@huawei.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Chen Zhou chenzhou10@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/tlbflush.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 4b76c6ea9d1b..50ba4d6036b5 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -220,9 +220,8 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
dsb(ishst); - /* This function is only called on a small page */ - __tlbi_level(vale1is, addr, 3); - __tlbi_user_level(vale1is, addr, 3); + __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); }
static inline void flush_tlb_page(struct vm_area_struct *vma,