
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IBV2E4 -------------------------------- Support hardware Xcall and Xint. Hardware xint provides a separate entry for interrupt handling, so we can use it to customize and respond to interrupts relatively quickly. Support hardware xcall. Hardware xcall provides a separate entry for el0 syscall handling, so we can use it to customize and respond to system call relatively quickly. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/include/asm/actlr.h | 15 ++++++ arch/arm64/include/asm/vectors.h | 1 + arch/arm64/kernel/cpufeature.c | 81 ++++++++++++++++++++++++++++++++ arch/arm64/kernel/entry-common.c | 15 +++++- arch/arm64/kernel/entry.S | 52 +++++++++++++++++++- arch/arm64/tools/cpucaps | 2 +- 6 files changed, 161 insertions(+), 5 deletions(-) create mode 100644 arch/arm64/include/asm/actlr.h diff --git a/arch/arm64/include/asm/actlr.h b/arch/arm64/include/asm/actlr.h new file mode 100644 index 000000000000..3a44b6876e81 --- /dev/null +++ b/arch/arm64/include/asm/actlr.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 - Huawei Ltd. + */ + +#ifndef __ASM_ACTLR_H +#define __ASM_ACTLR_H + +#define ACTLR_ELx_XCALL_SHIFT 20 +#define ACTLR_ELx_XCALL (UL(1) << ACTLR_ELx_XCALL_SHIFT) + +#define ACTLR_ELx_XINT_SHIFT (21) +#define ACTLR_ELx_XINT (UL(1) << ACTLR_ELx_XINT_SHIFT) + +#endif /* __ASM_ACTLR_H */ diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h index bc9a2145f419..0addb4bd2b3d 100644 --- a/arch/arm64/include/asm/vectors.h +++ b/arch/arm64/include/asm/vectors.h @@ -11,6 +11,7 @@ #include <asm/fixmap.h> extern char vectors[]; +extern char vectors_xcall_xint[]; extern char tramp_vectors[]; extern char __bp_harden_el1_vectors[]; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 5c3d206785bd..b13858668877 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -76,6 +76,7 @@ #include <linux/kasan.h> #include <linux/percpu.h> +#include <asm/actlr.h> #include <asm/cpu.h> #include <asm/cpufeature.h> #include <asm/cpu_ops.h> @@ -2466,6 +2467,77 @@ static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __u } #endif +#ifdef CONFIG_ACTLR_XCALL_XINT +static bool has_arch_xcall_xint_support(const struct arm64_cpu_capabilities *entry, int scope) +{ + /* List of CPUs that support Xcall/Xint */ + static const struct midr_range xcall_xint_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_HISI_HIP12), + { /* sentinel */ } + }; + + return is_midr_in_range_list(read_cpuid_id(), xcall_xint_cpus); +} + +static void enable_xcall_xint_vectors(void) +{ + /* + * Upon CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY is enabled, + * the vbar_el1 is set to the vectors starts from __bp_harden_el1_vectors. + * Kernel will jump to the xcall/xint vectors from the trampoline vector + * defined in the macro tramp_ventry. + */ + if (__this_cpu_read(this_cpu_vector) != vectors) + return; + + /* + * Upon KAISER is enabled, the vbar_el1 is set to the vectors starts + * from tramp_vectors. Kernel will jump to the vectors_xcall_xint from + * the trampoline vector defined in the macro tramp_ventry. + */ + if (arm64_kernel_unmapped_at_el0()) + return; + + /* + * If neither KAISER or BHB_MITIGATION is enabled, then we switch + * the vbar_el1 from the default vectors to the xcall/xint vectors + * at once. + */ + write_sysreg(vectors_xcall_xint, vbar_el1); + isb(); +} + +static void cpu_enable_arch_xcall_xint(const struct arm64_cpu_capabilities *__unused) +{ + int cpu = smp_processor_id(); + u64 actlr_el1, actlr_el2; + u64 el; + + el = read_sysreg(CurrentEL); + if (el == CurrentEL_EL2) { + /* + * Enable EL2 trap when access ACTLR_EL1 in guest kernel. + */ + write_sysreg_s(read_sysreg_s(SYS_HCR_EL2) | HCR_TACR, SYS_HCR_EL2); + actlr_el2 = read_sysreg(actlr_el2); + actlr_el2 |= (ACTLR_ELx_XINT | ACTLR_ELx_XCALL); + write_sysreg(actlr_el2, actlr_el2); + isb(); + actlr_el2 = read_sysreg(actlr_el2); + pr_info("actlr_el2: %llx, cpu:%d\n", actlr_el2, cpu); + } else { + actlr_el1 = read_sysreg(actlr_el1); + actlr_el1 |= (ACTLR_ELx_XINT | ACTLR_ELx_XCALL); + write_sysreg(actlr_el1, actlr_el1); + isb(); + actlr_el1 = read_sysreg(actlr_el1); + pr_info("actlr_el1: %llx, cpu:%d\n", actlr_el1, cpu); + } + + enable_xcall_xint_vectors(); +} +#endif + static const struct arm64_cpu_capabilities arm64_features[] = { { .capability = ARM64_ALWAYS_BOOT, @@ -3031,6 +3103,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_ls64_v, ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64_V) }, +#endif +#ifdef CONFIG_ACTLR_XCALL_XINT + { + .desc = "Hardware Xcall and Xint Support", + .capability = ARM64_HAS_HW_XCALL_XINT, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_arch_xcall_xint_support, + .cpu_enable = cpu_enable_arch_xcall_xint, + }, #endif {}, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index b7cc01130ff5..a90231346751 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -609,7 +609,7 @@ static void noinstr el0_xint(struct pt_regs *regs, u64 nmi_flag, } -asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_fast_irq_handler(struct pt_regs *regs) { el0_xint(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq); } @@ -831,7 +831,7 @@ static void noinstr el0_xcall(struct pt_regs *regs) fast_exit_to_user_mode(regs); } -asmlinkage void noinstr el0t_64_xcall_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_fast_syscall_handler(struct pt_regs *regs) { el0_xcall(regs); } @@ -1051,6 +1051,17 @@ UNHANDLED(el0t, 32, fiq) UNHANDLED(el0t, 32, error) #endif /* CONFIG_AARCH32_EL0 */ +#ifdef CONFIG_ACTLR_XCALL_XINT +asmlinkage void noinstr el0t_64_xcall_handler(struct pt_regs *regs) +{ + el0_svc(regs); +} +asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs) +{ + el0_interrupt(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq); +} +#endif + #ifdef CONFIG_VMAP_STACK asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index da3809632f0f..801a521613ad 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -48,7 +48,13 @@ mrs x30, tpidrro_el0 msr tpidrro_el0, xzr .else + alternative_if ARM64_HAS_HW_XCALL_XINT + mrs x30, tpidrro_el0 + msr tpidrro_el0, xzr + alternative_else mov x30, xzr + nop + alternative_endif .endif .Lskip_tramp_vectors_cleanup\@: .endif @@ -570,6 +576,29 @@ SYM_CODE_START(vectors) kernel_ventry 0, t, 32, error // Error 32-bit EL0 SYM_CODE_END(vectors) + .align 11 +SYM_CODE_START(vectors_xcall_xint) + kernel_ventry 1, t, 64, sync // Synchronous EL1t + kernel_ventry 1, t, 64, irq // IRQ EL1t + kernel_ventry 1, t, 64, fiq // FIQ EL1t + kernel_ventry 1, t, 64, error // Error EL1t + + kernel_ventry 1, h, 64, sync // Synchronous EL1h + kernel_ventry 1, h, 64, irq // IRQ EL1h + kernel_ventry 1, h, 64, fiq // FIQ EL1h + kernel_ventry 1, h, 64, error // Error EL1h + + kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 + kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 + kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 + kernel_ventry 0, t, 64, error // Error 64-bit EL0 + + kernel_ventry 0, t, 64, xcall // XCALL synchronous 64-bit EL0 + kernel_ventry 0, t, 64, xint // XINT 64-bit EL0 + kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 + kernel_ventry 0, t, 32, error // Error 32-bit EL0 +SYM_CODE_END(vectors_xcall_xint) + #ifdef CONFIG_VMAP_STACK SYM_CODE_START_LOCAL(__bad_stack) /* @@ -648,7 +677,7 @@ SYM_CODE_END(__bad_stack) kernel_entry 0, 64 #endif mov x0, sp - bl el0t_64_xcall_handler + bl el0t_64_fast_syscall_handler #ifdef CONFIG_SECURITY_FEATURE_BYPASS kernel_exit 0, xcall #else @@ -696,7 +725,7 @@ SYM_CODE_END(__bad_stack) kernel_entry 0, 64 #endif mov x0, sp - bl el0t_64_xint_handler + bl el0t_64_fast_irq_handler #ifdef CONFIG_SECURITY_FEATURE_BYPASS kernel_exit 0, xint #else @@ -759,6 +788,10 @@ SYM_CODE_END(el\el\ht\()_\regsize\()_\label) entry_handler 0, t, 64, fiq entry_handler 0, t, 64, error +#ifdef CONFIG_ACTLR_XCALL_XINT + entry_handler 0, t, 64, xcall + entry_handler 0, t, 64, xint +#endif entry_handler 0, t, 32, sync entry_handler 0, t, 32, irq entry_handler 0, t, 32, fiq @@ -849,6 +882,12 @@ alternative_else_nop_endif msr tpidrro_el0, x30 // Restored in kernel_ventry .endif + .if \regsize == 32 + alternative_if ARM64_HAS_HW_XCALL_XINT + msr tpidrro_el0, x30 // Restored in kernel_ventry + alternative_else_nop_endif + .endif + .if \bhb == BHB_MITIGATION_LOOP /* * This sequence must appear before the first indirect branch. i.e. the @@ -873,7 +912,12 @@ alternative_else_nop_endif 2: tramp_map_kernel x30 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 +alternative_if_not ARM64_HAS_HW_XCALL_XINT tramp_data_read_var x30, vectors + b 3f +alternative_else_nop_endif + tramp_data_read_var x30, vectors_xcall_xint +3: alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM prfm plil1strm, [x30, #(1b - \vector_start)] alternative_else_nop_endif @@ -881,7 +925,11 @@ alternative_else_nop_endif msr vbar_el1, x30 isb .else +alternative_if_not ARM64_HAS_HW_XCALL_XINT adr_l x30, vectors +alternative_else + adr_l x30, vectors_xcall_xint +alternative_endif .endif // \kpti == 1 .if \bhb == BHB_MITIGATION_FW diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 27d93050e5da..ae0268822d61 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -112,7 +112,7 @@ HAS_XCALL HAS_XINT HAS_LS64 HAS_LS64_V -KABI_RESERVE_5 +HAS_HW_XCALL_XINT KABI_RESERVE_6 KABI_RESERVE_7 KABI_RESERVE_8 -- 2.34.1