
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IBV2E4 -------------------------------- Support hardware xcall. Hardware xcall provides a separate entry for el0 syscall handling, so we can use it to customize and respond to system call relatively quickly. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- v2: - Remove an excess semicolon. - Fix a typo. --- arch/Kconfig | 14 ++++----- arch/arm64/Kconfig | 5 ++++ arch/arm64/include/asm/sysreg.h | 3 ++ arch/arm64/kernel/cpufeature.c | 50 ++++++++++++++++++++++++++------ arch/arm64/kernel/entry-common.c | 4 +-- arch/arm64/kernel/entry.S | 38 ++++++++++++++++++++++-- arch/arm64/tools/cpucaps | 2 +- 7 files changed, 94 insertions(+), 22 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index a4ed5d338dad..dd7b91f19edc 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1574,12 +1574,12 @@ config FAST_IRQ framework for latency-sensitive interrupts. config DEBUG_FEATURE_BYPASS - bool "Bypass debug feature in fast syscall" - depends on FAST_SYSCALL || FAST_IRQ + bool "Bypass debug feature in fast syscall/irq and hardware xcall" + depends on FAST_SYSCALL || FAST_IRQ || ARCH_SUPPORTS_XCALL depends on !LOCKDEP default y help - This to bypass debug feature in fast syscall. + This to bypass debug feature in fast syscall/irq and hardware xcall. The svc exception handling process, which includes auxiliary functions for debug/trace and core functions like KPTI, has been identified as overly "lengthy". @@ -1587,12 +1587,12 @@ config DEBUG_FEATURE_BYPASS Disable this config to keep debug feature in fast syscall. config SECURITY_FEATURE_BYPASS - bool "Bypass security feature in fast syscall" - depends on FAST_SYSCALL || FAST_IRQ + bool "Bypass security feature in fast syscall and hardware xcall" + depends on FAST_SYSCALL || FAST_IRQ || ARCH_SUPPORTS_XCALL default y help - This to bypass security feature in fast syscall. - The svc exception handling process, which includes auxiliary + This to bypass security feature in fast syscall/irq and hardware + xcall. The svc exception handling process, which includes auxiliary functions for debug/trace and core functions like KPTI, has been identified as overly "lengthy". In fast syscall we only considers necessary features. diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 57d05cfbd29e..14b818fce7a1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1724,6 +1724,11 @@ config ARCH_SUPPORTS_XINT depends on ARM_GIC_V3 depends on !COMPAT +config ARCH_SUPPORTS_XCALL + bool "Hardware xcall support" + depends on !COMPAT + default n + config TRANS_TABLE def_bool y depends on HIBERNATION || KEXEC_CORE diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b34e8cc4476b..f97b2b400266 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -270,6 +270,9 @@ #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) +#define ACTLR_ELx_XCALL_SHIFT 20 +#define ACTLR_ELx_XCALL (BIT(ACTLR_ELx_XCALL_SHIFT)) + #define ACTLR_ELx_XINT_SHIFT 21 #define ACTLR_ELx_XINT (BIT(ACTLR_ELx_XINT_SHIFT)) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b4de05241f7d..055eb16309e2 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2428,24 +2428,33 @@ static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __u } #endif -#ifdef CONFIG_ARCH_SUPPORTS_XINT -static bool test_has_xfunc(void) +#if defined(CONFIG_ARCH_SUPPORTS_XINT) || defined(CONFIG_ARCH_SUPPORTS_XCALL) +static bool test_has_xfunc(bool is_xint) { u64 new, old = read_sysreg(actlr_el1); - write_sysreg(old | ACTLR_ELx_XINT, actlr_el1); + if (is_xint) + write_sysreg(old | ACTLR_ELx_XINT, actlr_el1); + else + write_sysreg(old | ACTLR_ELx_XCALL, actlr_el1); + isb(); new = read_sysreg(actlr_el1); - if (new & ACTLR_ELx_XINT) { + if (is_xint && (new & ACTLR_ELx_XINT)) { write_sysreg(old, actlr_el1); hw_xint_support = true; return true; } + if (!is_xint && (new & ACTLR_ELx_XCALL)) { + write_sysreg(old, actlr_el1); + return true; + } + return false; } -static void enable_xfunc(void) +static void enable_xfunc(bool is_xint) { u64 actlr_el1, actlr_el2; u64 el; @@ -2453,7 +2462,7 @@ static void enable_xfunc(void) el = read_sysreg(CurrentEL); if (el == CurrentEL_EL2) { actlr_el2 = read_sysreg(actlr_el2); - actlr_el2 |= ACTLR_ELx_XINT; + actlr_el2 |= (is_xint ? ACTLR_ELx_XINT : ACTLR_ELx_XCALL); write_sysreg(actlr_el2, actlr_el2); isb(); actlr_el2 = read_sysreg(actlr_el2); @@ -2461,13 +2470,15 @@ static void enable_xfunc(void) } actlr_el1 = read_sysreg(actlr_el1); - actlr_el1 |= ACTLR_ELx_XINT; + actlr_el1 |= (is_xint ? ACTLR_ELx_XINT : ACTLR_ELx_XCALL); write_sysreg(actlr_el1, actlr_el1); isb(); actlr_el1 = read_sysreg(actlr_el1); pr_info("actlr_el1: %llx, cpu:%d\n", actlr_el1, smp_processor_id()); } +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XINT static bool test_has_xint(const struct arm64_cpu_capabilities *entry, int scope) { if (!IS_ENABLED(CONFIG_ARM64_NMI)) @@ -2479,12 +2490,24 @@ static bool test_has_xint(const struct arm64_cpu_capabilities *entry, int scope) } #endif - return test_has_xfunc(); + return test_has_xfunc(true); } static void xint_enable(const struct arm64_cpu_capabilities *__unused) { - enable_xfunc(); + enable_xfunc(true); +} +#endif + +#ifdef CONFIG_ARCH_SUPPORTS_XCALL +static bool test_has_xcall(const struct arm64_cpu_capabilities *entry, int scope) +{ + return test_has_xfunc(false); +} + +static void xcall_enable(const struct arm64_cpu_capabilities *__unused) +{ + enable_xfunc(false); } #endif @@ -3044,6 +3067,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = test_has_xint, .cpu_enable = xint_enable, }, +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + { + .desc = "Hardware xcall Support", + .capability = ARM64_HAS_HW_XCALL, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = test_has_xcall, + .cpu_enable = xcall_enable, + }, #endif {}, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index d09029dfcf02..ea78eb729ff6 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -154,7 +154,7 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) exit_to_user_mode(regs); } -#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_FAST_IRQ) +#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XCALL) /* * Copy from exit_to_user_mode_prepare */ @@ -818,7 +818,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) exit_to_user_mode(regs); } -#ifdef CONFIG_FAST_SYSCALL +#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_ARCH_SUPPORTS_XCALL) /* Copy from el0_sync */ static void noinstr el0_xcall(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 046225fa2f90..23c1b255e02e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -564,7 +564,11 @@ SYM_CODE_START(vectors) kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 kernel_ventry 0, t, 64, error // Error 64-bit EL0 +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + kernel_ventry 0, t, 64, xcall // xcall synchronous 64-bit EL0 +#else kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 +#endif #ifdef CONFIG_ARCH_SUPPORTS_XINT kernel_ventry 0, t, 64, xint // XINT 64-bit EL0 #else @@ -722,7 +726,19 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) b .Lret_to_kernel_entry\@ alternative_else_nop_endif check_xcall_pre_kernel_entry - .Lret_to_kernel_entry\@: +.Lret_to_kernel_entry\@: + .endif +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + .if \el == 0 && \regsize == 64 && \label == xcall + alternative_if_not ARM64_HAS_HW_XCALL + b .Lskip_hw_xcall\@ + alternative_else_nop_endif + kernel_entry 0, 64, xcall + mov x0, sp + bl el0t_64_xcall_handler + kernel_exit 0, xcall +.Lskip_hw_xcall\@: .endif #endif #ifdef CONFIG_FAST_IRQ @@ -775,7 +791,11 @@ SYM_CODE_END(el\el\ht\()_\regsize\()_\label) entry_handler 0, t, 64, fiq entry_handler 0, t, 64, error +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + entry_handler 0, t, 64, xcall +#else entry_handler 0, t, 32, sync +#endif #ifdef CONFIG_ARCH_SUPPORTS_XINT entry_handler 0, t, 64, xint #else @@ -926,13 +946,19 @@ alternative_else_nop_endif tramp_ventry .Lvector_start\@, 64, \kpti, \bhb .endr +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb +#else + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb +#endif + #ifdef CONFIG_ARCH_SUPPORTS_XINT tramp_ventry .Lvector_start\@, 64, \kpti, \bhb #else tramp_ventry .Lvector_start\@, 32, \kpti, \bhb #endif - .rept 3 + .rept 2 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb .endr .endm @@ -983,13 +1009,19 @@ SYM_CODE_END(tramp_exit) tramp_ventry .Lvector_start\@, 64, 0, \bhb .endr +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + tramp_ventry .Lvector_start\@, 64, 0, \bhb +#else + tramp_ventry .Lvector_start\@, 32, 0, \bhb +#endif + #ifdef CONFIG_ARCH_SUPPORTS_XINT tramp_ventry .Lvector_start\@, 64, 0, \bhb #else tramp_ventry .Lvector_start\@, 32, 0, \bhb #endif - .rept 3 + .rept 2 tramp_ventry .Lvector_start\@, 32, 0, \bhb .endr .endm diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index b1f109f17e4f..3a6e38f45618 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -110,8 +110,8 @@ WORKAROUND_HISI_HIP08_RU_PREFETCH WORKAROUND_HISILICON_1980005 HAS_XCALL HAS_XINT +HAS_HW_XCALL HAS_HW_XINT -KABI_RESERVE_4 KABI_RESERVE_5 KABI_RESERVE_6 KABI_RESERVE_7 -- 2.34.1