Anshuman Khandual (1): arm64/cpufeature: Add remaining feature bits in ID_AA64ISAR0 register
Sami Tolvanen (1): arm64: use a common .arch preamble for inline assembly
Yuan Can (1): config: arm64: update defconfig
Zhenyu Ye (3): arm64: tlb: Detect the ARMv8.4 TLBI RANGE feature arm64: enable tlbi range instructions arm64: tlb: Use the TLBI RANGE feature in arm64
arch/arm64/Kconfig | 18 +++ arch/arm64/Makefile | 15 ++- arch/arm64/configs/euleros_defconfig | 5 + arch/arm64/configs/hulk_defconfig | 5 + arch/arm64/configs/openeuler_defconfig | 5 + arch/arm64/include/asm/compiler.h | 6 + arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/cpufeature.h | 6 + arch/arm64/include/asm/sysreg.h | 4 + arch/arm64/include/asm/tlbflush.h | 160 ++++++++++++++++++++----- arch/arm64/kernel/cpufeature.c | 11 ++ 11 files changed, 205 insertions(+), 33 deletions(-)
From: Anshuman Khandual anshuman.khandual@arm.com
mainline inclusion from mainline-v5.8 commit 7cd51a5a84d115cd49c43e90b083ca60873874e5 category: feature CVE: NA
-----------------------
Enable TLB features bit in ID_AA64ISAR0 register as per ARM DDI 0487F.a specification.
Cc: Catalin Marinas catalin.marinas@arm.com Cc: Will Deacon will@kernel.org Cc: Mark Rutland mark.rutland@arm.com Cc: Suzuki K Poulose suzuki.poulose@arm.com Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org
Suggested-by: Will Deacon will@kernel.org Signed-off-by: Anshuman Khandual anshuman.khandual@arm.com Reviewed-by: Suzuki K Poulose suzuki.poulose@arm.com Link: https://lore.kernel.org/r/1589881254-10082-10-git-send-email-anshuman.khandu... Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Yuan Can yuancan@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kernel/cpufeature.c | 1 + 2 files changed, 2 insertions(+)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 2b53a427c06d..d6a1e27e8c69 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -532,6 +532,7 @@ #endif
/* id_aa64isar0 */ +#define ID_AA64ISAR0_TLB_SHIFT 56 #define ID_AA64ISAR0_TS_SHIFT 52 #define ID_AA64ISAR0_FHM_SHIFT 48 #define ID_AA64ISAR0_DP_SHIFT 44 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 056c1d2f20c6..73da5619ddaa 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -130,6 +130,7 @@ static bool __system_matches_cap(unsigned int n); * sync with the documentation of the CPU feature register ABI. */ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
From: Zhenyu Ye yezhenyu2@huawei.com
mainline inclusion from mainline-v5.8 commit b620ba54547cd0f98e35c1be102eec2cc25fda5d category: feature CVE: NA
-----------------------
ARMv8.4-TLBI provides TLBI invalidation instruction that apply to a range of input addresses. This patch detect this feature.
Signed-off-by: Zhenyu Ye yezhenyu2@huawei.com Link: https://lore.kernel.org/r/20200715071945.897-2-yezhenyu2@huawei.com [catalin.marinas@arm.com: some renaming for consistency] Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Yuan Can yuancan@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/sysreg.h | 3 +++ arch/arm64/kernel/cpufeature.c | 10 ++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 5935aadc065f..d805e2369517 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -68,7 +68,8 @@ #define ARM64_HAS_CNP 47 #define ARM64_HAS_ARMv8_4_TTL 48 #define ARM64_HAS_DCPODP 49 +#define ARM64_HAS_TLB_RANGE 50
-#define ARM64_NCAPS 50 +#define ARM64_NCAPS 51
#endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d6a1e27e8c69..72eb8d63dee7 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -546,6 +546,9 @@ #define ID_AA64ISAR0_SHA1_SHIFT 8 #define ID_AA64ISAR0_AES_SHIFT 4
+#define ID_AA64ISAR0_TLB_RANGE_NI 0x0 +#define ID_AA64ISAR0_TLB_RANGE 0x2 + /* id_aa64isar1 */ #define ID_AA64ISAR1_SB_SHIFT 36 #define ID_AA64ISAR1_GPI_SHIFT 28 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 73da5619ddaa..eee610433539 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1574,6 +1574,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .min_field_value = 1, .matches = has_cpuid_feature, }, + { + .desc = "TLB range maintenance instructions", + .capability = ARM64_HAS_TLB_RANGE, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR0_EL1, + .field_pos = ID_AA64ISAR0_TLB_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = ID_AA64ISAR0_TLB_RANGE, + }, #ifdef CONFIG_ARM64_HW_AFDBM { /*
From: Zhenyu Ye yezhenyu2@huawei.com
mainline inclusion from mainline-v5.8 commit 7c78f67e9bd97478d56157c2ad53823668b5b822 category: feature CVE: NA
-----------------------
TLBI RANGE feature instoduces new assembly instructions and only support by binutils >= 2.30. Add necessary Kconfig logic to allow this to be enabled and pass '-march=armv8.4-a' to KBUILD_CFLAGS.
Signed-off-by: Zhenyu Ye yezhenyu2@huawei.com Link: https://lore.kernel.org/r/20200715071945.897-3-yezhenyu2@huawei.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Yuan Can yuancan@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/Kconfig | 18 ++++++++++++++++++ arch/arm64/Makefile | 5 +++++ 2 files changed, 23 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 545f9cc6f5e4..ce949ebe6792 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1304,6 +1304,24 @@ config AS_HAS_CFI_NEGATE_RA_STATE
endmenu
+menu "ARMv8.4 architectural features" + +config AS_HAS_ARMV8_4 + def_bool $(cc-option,-Wa$(comma)-march=armv8.4-a) + +config ARM64_TLB_RANGE + bool "Enable support for tlbi range feature" + default y + depends on AS_HAS_ARMV8_4 + help + ARMv8.4-TLBI provides TLBI invalidation instruction that apply to a + range of input addresses. + + The feature introduces new assembly instructions, and they were + support when binutils >= 2.30. + +endmenu + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 75205ff038a0..3e4e5cc1636f 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -68,6 +68,11 @@ branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a KBUILD_CFLAGS += $(branch-prot-flags-y) endif
+ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) +# make sure to pass the newest target architecture to -march. +KBUILD_CFLAGS += -Wa,-march=armv8.4-a +endif + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__
From: Zhenyu Ye yezhenyu2@huawei.com
mainline inclusion from mainline-v5.8 commit d1d3aa98b1d4826a19adfefb69b96142a0cac633 category: feature CVE: NA
-----------------------
Add __TLBI_VADDR_RANGE macro and rewrite __flush_tlb_range().
When cpu supports TLBI feature, the minimum range granularity is decided by 'scale', so we can not flush all pages by one instruction in some cases.
For example, when the pages = 0xe81a, let's start 'scale' from maximum, and find right 'num' for each 'scale':
1. scale = 3, we can flush no pages because the minimum range is 2^(5*3 + 1) = 0x10000. 2. scale = 2, the minimum range is 2^(5*2 + 1) = 0x800, we can flush 0xe800 pages this time, the num = 0xe800/0x800 - 1 = 0x1c. Remaining pages is 0x1a; 3. scale = 1, the minimum range is 2^(5*1 + 1) = 0x40, no page can be flushed. 4. scale = 0, we flush the remaining 0x1a pages, the num = 0x1a/0x2 - 1 = 0xd.
However, in most scenarios, the pages = 1 when flush_tlb_range() is called. Start from scale = 3 or other proper value (such as scale = ilog2(pages)), will incur extra overhead. So increase 'scale' from 0 to maximum, the flush order is exactly opposite to the example.
Signed-off-by: Zhenyu Ye yezhenyu2@huawei.com Link: https://lore.kernel.org/r/20200715071945.897-4-yezhenyu2@huawei.com [catalin.marinas@arm.com: removed unnecessary masks in __TLBI_VADDR_RANGE] [catalin.marinas@arm.com: __TLB_RANGE_NUM subtracts 1] [catalin.marinas@arm.com: minor adjustments to the comments] [catalin.marinas@arm.com: introduce system_supports_tlb_range()] Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: y00494749 y00494749@notesmail.huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/cpufeature.h | 6 ++ arch/arm64/include/asm/tlbflush.h | 154 ++++++++++++++++++++++------ 2 files changed, 131 insertions(+), 29 deletions(-)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 2eed935f0a02..e2637094ca6f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -604,6 +604,12 @@ static inline bool system_supports_cnp(void) cpus_have_const_cap(ARM64_HAS_CNP); }
+static inline bool system_supports_tlb_range(void) +{ + return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) && + cpus_have_const_cap(ARM64_HAS_TLB_RANGE); +} + #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 50ba4d6036b5..a3e27a1cd260 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -71,6 +71,31 @@ __ta; \ })
+/* + * Get translation granule of the system, which is decided by + * PAGE_SIZE. Used by TTL. + * - 4KB : 1 + * - 16KB : 2 + * - 64KB : 3 + */ +#define TLBI_TTL_TG_4K 1 +#define TLBI_TTL_TG_16K 2 +#define TLBI_TTL_TG_64K 3 + +static inline unsigned long get_trans_granule(void) +{ + switch (PAGE_SIZE) { + case SZ_4K: + return TLBI_TTL_TG_4K; + case SZ_16K: + return TLBI_TTL_TG_16K; + case SZ_64K: + return TLBI_TTL_TG_64K; + default: + return 0; + } +} + /* * Level-based TLBI operations. * @@ -84,9 +109,6 @@ * in asm/stage2_pgtable.h. */ #define TLBI_TTL_MASK GENMASK_ULL(47, 44) -#define TLBI_TTL_TG_4K 1 -#define TLBI_TTL_TG_16K 2 -#define TLBI_TTL_TG_64K 3
#define __tlbi_level(op, addr, level) do { \ u64 arg = addr; \ @@ -94,19 +116,7 @@ if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \ level) { \ u64 ttl = level & 3; \ - \ - switch (PAGE_SIZE) { \ - case SZ_4K: \ - ttl |= TLBI_TTL_TG_4K << 2; \ - break; \ - case SZ_16K: \ - ttl |= TLBI_TTL_TG_16K << 2; \ - break; \ - case SZ_64K: \ - ttl |= TLBI_TTL_TG_64K << 2; \ - break; \ - } \ - \ + ttl |= get_trans_granule() << 2; \ arg &= ~TLBI_TTL_MASK; \ arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \ } \ @@ -119,6 +129,44 @@ __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ } while (0)
+/* + * This macro creates a properly formatted VA operand for the TLB RANGE. + * The value bit assignments are: + * + * +----------+------+-------+-------+-------+----------------------+ + * | ASID | TG | SCALE | NUM | TTL | BADDR | + * +-----------------+-------+-------+-------+----------------------+ + * |63 48|47 46|45 44|43 39|38 37|36 0| + * + * The address range is determined by below formula: + * [BADDR, BADDR + (NUM + 1) * 2^(5*SCALE + 1) * PAGESIZE) + * + */ +#define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl) \ + ({ \ + unsigned long __ta = (addr) >> PAGE_SHIFT; \ + __ta &= GENMASK_ULL(36, 0); \ + __ta |= (unsigned long)(ttl) << 37; \ + __ta |= (unsigned long)(num) << 39; \ + __ta |= (unsigned long)(scale) << 44; \ + __ta |= get_trans_granule() << 46; \ + __ta |= (unsigned long)(asid) << 48; \ + __ta; \ + }) + +/* These macros are used by the TLBI RANGE feature. */ +#define __TLBI_RANGE_PAGES(num, scale) \ + ((unsigned long)((num) + 1) << (5 * (scale) + 1)) +#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3) + +/* + * Generate 'num' values from -1 to 30 with -1 rejected by the + * __flush_tlb_range() loop below. + */ +#define TLBI_RANGE_MASK GENMASK_ULL(4, 0) +#define __TLBI_RANGE_NUM(pages, scale) \ + ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1) + /* * TLB Invalidation * ================ @@ -242,32 +290,80 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long stride, bool last_level, int tlb_level) { + int num = 0; + int scale = 0; unsigned long asid = ASID(vma->vm_mm); unsigned long addr; + unsigned long pages;
start = round_down(start, stride); end = round_up(end, stride); + pages = (end - start) >> PAGE_SHIFT;
- if ((end - start) >= (MAX_TLBI_OPS * stride)) { + /* + * When not uses TLB range ops, we can handle up to + * (MAX_TLBI_OPS - 1) pages; + * When uses TLB range ops, we can handle up to + * (MAX_TLBI_RANGE_PAGES - 1) pages. + */ + if ((!system_supports_tlb_range() && + (end - start) >= (MAX_TLBI_OPS * stride)) || + pages >= MAX_TLBI_RANGE_PAGES) { flush_tlb_mm(vma->vm_mm); return; }
- /* Convert the stride into units of 4k */ - stride >>= 12; + dsb(ishst);
- start = __TLBI_VADDR(start, asid); - end = __TLBI_VADDR(end, asid); + /* + * When the CPU does not support TLB range operations, flush the TLB + * entries one by one at the granularity of 'stride'. If the the TLB + * range ops are supported, then: + * + * 1. If 'pages' is odd, flush the first page through non-range + * operations; + * + * 2. For remaining pages: the minimum range granularity is decided + * by 'scale', so multiple range TLBI operations may be required. + * Start from scale = 0, flush the corresponding number of pages + * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it + * until no pages left. + * + * Note that certain ranges can be represented by either num = 31 and + * scale or num = 0 and scale + 1. The loop below favours the latter + * since num is limited to 30 by the __TLBI_RANGE_NUM() macro. + */ + while (pages > 0) { + if (!system_supports_tlb_range() || + pages % 2 == 1) { + addr = __TLBI_VADDR(start, asid); + if (last_level) { + __tlbi_level(vale1is, addr, tlb_level); + __tlbi_user_level(vale1is, addr, tlb_level); + } else { + __tlbi_level(vae1is, addr, tlb_level); + __tlbi_user_level(vae1is, addr, tlb_level); + } + start += stride; + pages -= stride >> PAGE_SHIFT; + continue; + }
- dsb(ishst); - for (addr = start; addr < end; addr += stride) { - if (last_level) { - __tlbi_level(vale1is, addr, tlb_level); - __tlbi_user_level(vale1is, addr, tlb_level); - } else { - __tlbi_level(vae1is, addr, tlb_level); - __tlbi_user_level(vae1is, addr, tlb_level); + num = __TLBI_RANGE_NUM(pages, scale); + if (num >= 0) { + addr = __TLBI_VADDR_RANGE(start, asid, scale, + num, tlb_level); + if (last_level) { + __tlbi(rvale1is, addr); + __tlbi_user(rvale1is, addr); + } else { + __tlbi(rvae1is, addr); + __tlbi_user(rvae1is, addr); + } + start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; + pages -= __TLBI_RANGE_PAGES(num, scale); } + scale++; } dsb(ish); }
From: Sami Tolvanen samitolvanen@google.com
mainline inclusion from mainline-v5.8 commit 1764c3edc66880778604f5053fe2dda7b3ddd2c1 category: bugfix CVE: NA
-----------------------
Commit 7c78f67e9bd9 ("arm64: enable tlbi range instructions") breaks LLVM's integrated assembler, because -Wa,-march is only passed to external assemblers and therefore, the new instructions are not enabled when IAS is used.
This change adds a common architecture version preamble, which can be used in inline assembly blocks that contain instructions that require a newer architecture version, and uses it to fix __TLBI_0 and __TLBI_1 with ARM64_TLB_RANGE.
Fixes: 7c78f67e9bd9 ("arm64: enable tlbi range instructions") Signed-off-by: Sami Tolvanen samitolvanen@google.com Tested-by: Nathan Chancellor natechancellor@gmail.com Reviewed-by: Nathan Chancellor natechancellor@gmail.com Link: https://github.com/ClangBuiltLinux/linux/issues/1106 Link: https://lore.kernel.org/r/20200827203608.1225689-1-samitolvanen@google.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Yuan Can yuancan@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/Makefile | 14 +++++++++++--- arch/arm64/include/asm/compiler.h | 6 ++++++ arch/arm64/include/asm/tlbflush.h | 6 ++++-- 3 files changed, 21 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 3e4e5cc1636f..1d754357e842 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -64,13 +64,21 @@ branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pa # compiler to generate them and consequently to break the single image contract # we pass it only to the assembler. This option is utilized only in case of non # integrated assemblers. -branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a -KBUILD_CFLAGS += $(branch-prot-flags-y) +ifeq ($(CONFIG_AS_HAS_PAC), y) +asm-arch := armv8.3-a +endif endif
+KBUILD_CFLAGS += $(branch-prot-flags-y) + ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) # make sure to pass the newest target architecture to -march. -KBUILD_CFLAGS += -Wa,-march=armv8.4-a +asm-arch := armv8.4-a +endif + +ifdef asm-arch +KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \ + -DARM64_ASM_ARCH='"$(asm-arch)"' endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h index b9c630287012..fc0fd6d4c0f3 100644 --- a/arch/arm64/include/asm/compiler.h +++ b/arch/arm64/include/asm/compiler.h @@ -18,6 +18,12 @@ #ifndef __ASM_COMPILER_H #define __ASM_COMPILER_H
+#ifdef ARM64_ASM_ARCH +#define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n" +#else +#define ARM64_ASM_PREAMBLE +#endif + /* * This is used to ensure the compiler did actually allocate the register we * asked it for some inline assembly sequences. Apparently we can't trust the diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index a3e27a1cd260..2e4a9046ead7 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -39,14 +39,16 @@ * not. The macros handles invoking the asm with or without the * register argument as appropriate. */ -#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \ +#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \ + "tlbi " #op "\n" \ ALTERNATIVE("nop\n nop", \ "dsb ish\n tlbi " #op, \ ARM64_WORKAROUND_REPEAT_TLBI, \ CONFIG_QCOM_FALKOR_ERRATUM_1009) \ : : )
-#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ +#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \ + "tlbi " #op ", %0\n" \ ALTERNATIVE("nop\n nop", \ "dsb ish\n tlbi " #op ", %0", \ ARM64_WORKAROUND_REPEAT_TLBI, \
From: Yuan Can yuancan@huawei.com
hulk inclusion category: feature bugzilla: NA CVE: NA
-------------------------------------------------
Add default value for TLB_RANGE feature for hulk_defconfig, openeuler_defconfig and euleros_defconfig.
Signed-off-by: Yuan Can yuancan@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/configs/euleros_defconfig | 5 +++++ arch/arm64/configs/hulk_defconfig | 5 +++++ arch/arm64/configs/openeuler_defconfig | 5 +++++ 3 files changed, 15 insertions(+)
diff --git a/arch/arm64/configs/euleros_defconfig b/arch/arm64/configs/euleros_defconfig index 2bc096801d3a..db84fb568dfd 100644 --- a/arch/arm64/configs/euleros_defconfig +++ b/arch/arm64/configs/euleros_defconfig @@ -475,6 +475,11 @@ CONFIG_RANDOMIZE_BASE=y CONFIG_RANDOMIZE_MODULE_REGION_FULL=y CONFIG_ARM64_CNP=n
+# +# ARMv8.4 architectural features +# +# CONFIG_ARM64_TLB_RANGE is not set + # # Boot options # diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index 92b8b1e361fa..3c553d50ebe5 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -489,6 +489,11 @@ CONFIG_ARM64_PTR_AUTH=y CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y CONFIG_AS_HAS_PAC=y
+# +# ARMv8.4 architectural features +# +CONFIG_ARM64_TLB_RANGE=y + # # Boot options # diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 51600dafff85..882e45bbea45 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -485,6 +485,11 @@ CONFIG_ARM64_PTR_AUTH=y CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y CONFIG_AS_HAS_PAC=y
+# +# ARMv8.4 architectural features +# +CONFIG_ARM64_TLB_RANGE=y + # # Boot options #