[PATCH OLK-6.6 0/2] arm64: entry: Support hardware xint and xcall

Support hardware xint and xcall. Jinjie Ruan (2): arm64: entry: Support hardware xint arm64: entry: Support hardware xcall arch/Kconfig | 14 ++--- arch/arm64/Kconfig | 12 ++++ arch/arm64/include/asm/sysreg.h | 7 +++ arch/arm64/kernel/cpufeature.c | 100 +++++++++++++++++++++++++++++++ arch/arm64/kernel/entry-common.c | 75 ++++++++++++++++++++--- arch/arm64/kernel/entry.S | 74 +++++++++++++++++++++-- arch/arm64/tools/cpucaps | 4 +- drivers/irqchip/irq-gic-v3.c | 36 ++++++++++- 8 files changed, 298 insertions(+), 24 deletions(-) -- 2.34.1

hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IBV2E4 -------------------------------- Support hardware xint. Harware xint provides a separate entry for interrupt handling, so we can use it to customize and respond to interrupts relatively quickly. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/Kconfig | 7 ++++ arch/arm64/include/asm/sysreg.h | 4 ++ arch/arm64/kernel/cpufeature.c | 69 +++++++++++++++++++++++++++++++ arch/arm64/kernel/entry-common.c | 71 ++++++++++++++++++++++++++++---- arch/arm64/kernel/entry.S | 40 ++++++++++++++++-- arch/arm64/tools/cpucaps | 2 +- drivers/irqchip/irq-gic-v3.c | 36 +++++++++++++++- 7 files changed, 216 insertions(+), 13 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9da9d58f1c02..57d05cfbd29e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1717,6 +1717,13 @@ config ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG config ARCH_SUPPORTS_CRASH_DUMP def_bool y +config ARCH_SUPPORTS_XINT + bool "Hardware xint support" + default n + depends on ARM64_NMI + depends on ARM_GIC_V3 + depends on !COMPAT + config TRANS_TABLE def_bool y depends on HIBERNATION || KEXEC_CORE diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 435634a703c6..b34e8cc4476b 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -269,6 +269,10 @@ #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) + +#define ACTLR_ELx_XINT_SHIFT 21 +#define ACTLR_ELx_XINT (BIT(ACTLR_ELx_XINT_SHIFT)) + #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a1736e9044da..4f7cd116c36f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2424,6 +2424,66 @@ static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __u } #endif +#ifdef CONFIG_ARCH_SUPPORTS_XINT +static bool test_has_xint_xcall(void) +{ + u64 new, old = read_sysreg(actlr_el1); + + write_sysreg(old | ACTLR_ELx_XINT, actlr_el1); + + isb(); + new = read_sysreg(actlr_el1); + if (new & ACTLR_ELx_XINT) { + write_sysreg(old, actlr_el1); + return true; + } + + return false; +} + +static void enable_xint_xcall(void) +{ + u64 actlr_el1, actlr_el2; + u64 el; + + el = read_sysreg(CurrentEL); + if (el == CurrentEL_EL2) { + actlr_el2 = read_sysreg(actlr_el2); + actlr_el2 |= ACTLR_ELx_XINT; + write_sysreg(actlr_el2, actlr_el2); + isb(); + actlr_el2 = read_sysreg(actlr_el2); + pr_info("actlr_el2: %llx, cpu:%d\n", actlr_el2, smp_processor_id()); + } + + actlr_el1 = read_sysreg(actlr_el1); + actlr_el1 |= ACTLR_ELx_XINT; + write_sysreg(actlr_el1, actlr_el1); + isb(); + actlr_el1 = read_sysreg(actlr_el1); + pr_info("actlr_el1: %llx, cpu:%d\n", actlr_el1, smp_processor_id()); +} + +static bool test_has_xint(const struct arm64_cpu_capabilities *entry, int scope) +{ + if (!IS_ENABLED(CONFIG_ARM64_NMI)) + pr_info("CONFIG_ARM64_NMI disabled, using XINTs for guests only\n"); +#ifdef CONFIG_ARM64_PSEUDO_NMI + else if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) { + pr_info("Pseudo NMI enabled, not using architected XINT\n"); + return false; + } +#endif + + return test_has_xint_xcall(); +} + +static void xint_enable(const struct arm64_cpu_capabilities *__unused) +{ + enable_xint_xcall(); +} +#endif + static const struct arm64_cpu_capabilities arm64_features[] = { { .capability = ARM64_ALWAYS_BOOT, @@ -2971,6 +3031,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_xint_support, }, +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XINT + { + .desc = "Hardware xint Support", + .capability = ARM64_HAS_HW_XINT, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = test_has_xint, + .cpu_enable = xint_enable, + }, #endif {}, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 4602c107c40a..81e5286eb81f 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -13,6 +13,9 @@ #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/thread_info.h> +#ifdef CONFIG_ARCH_SUPPORTS_XINT +#include <linux/irqdomain.h> +#endif #include <asm/cpufeature.h> #include <asm/daifflags.h> @@ -607,7 +610,7 @@ static void noinstr el0_xint(struct pt_regs *regs, u64 nmi_flag, } -asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_sw_xint_handler(struct pt_regs *regs) { el0_xint(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq); } @@ -966,6 +969,66 @@ asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) __el0_error_handler_common(regs); } +#ifdef CONFIG_ARCH_SUPPORTS_XINT +extern bool gic_irqnr_is_special(u32 irqnr); +extern u64 gic_read_nmiar(void); + +extern bool check_xint(unsigned long hwirq); +extern bool is_spi(unsigned long hwirq); + +asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs) +{ + u32 irqnr = read_sysreg_s(SYS_ICC_HPPIR1_EL1); + if (gic_irqnr_is_special(irqnr)) + return; + + if (check_xint(irqnr)) { + struct pt_regs *old_regs; + struct irq_domain *domain; + struct irqaction *action; + struct irq_desc *desc; + struct irq_data *data; + + arch_nmi_enter(); + BUG_ON(in_nmi() == NMI_MASK); + __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); + old_regs = set_irq_regs(regs); + + domain = irq_get_default_host(); + data = radix_tree_lookup(&domain->revmap_tree, irqnr); + + desc = irq_data_to_desc(data); + action = desc->action; + + gic_read_nmiar(); + write_gicreg(irqnr, ICC_EOIR1_EL1); + isb(); + + if (is_spi(irqnr)) + action->handler(data->irq, action->dev_id); + else + action->handler(data->irq, raw_cpu_ptr(action->percpu_dev_id)); + gic_write_dir(irqnr); + + set_irq_regs(old_regs); + BUG_ON(!in_nmi()); + __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); + arch_nmi_exit(); + } else { + el0t_64_irq_handler(regs); + } +} +#else +#ifdef CONFIG_AARCH32_EL0 +asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} +#else /* CONFIG_AARCH32_EL0 */ +UNHANDLED(el0t, 32, irq) +#endif +#endif + #ifdef CONFIG_AARCH32_EL0 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) { @@ -1028,11 +1091,6 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) } } -asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) -{ - __el0_irq_handler_common(regs); -} - asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) { __el0_fiq_handler_common(regs); @@ -1044,7 +1102,6 @@ asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) } #else /* CONFIG_AARCH32_EL0 */ UNHANDLED(el0t, 32, sync) -UNHANDLED(el0t, 32, irq) UNHANDLED(el0t, 32, fiq) UNHANDLED(el0t, 32, error) #endif /* CONFIG_AARCH32_EL0 */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index da3809632f0f..046225fa2f90 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -565,7 +565,11 @@ SYM_CODE_START(vectors) kernel_ventry 0, t, 64, error // Error 64-bit EL0 kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 +#ifdef CONFIG_ARCH_SUPPORTS_XINT + kernel_ventry 0, t, 64, xint // XINT 64-bit EL0 +#else kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 +#endif kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 kernel_ventry 0, t, 32, error // Error 32-bit EL0 SYM_CODE_END(vectors) @@ -696,7 +700,7 @@ SYM_CODE_END(__bad_stack) kernel_entry 0, 64 #endif mov x0, sp - bl el0t_64_xint_handler + bl el0t_64_sw_xint_handler #ifdef CONFIG_SECURITY_FEATURE_BYPASS kernel_exit 0, xint #else @@ -729,6 +733,18 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) check_xint_pre_kernel_entry .Lskip_check_xint\@: .endif +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XINT + .if \el == 0 && \regsize == 64 && \label == xint + alternative_if_not ARM64_HAS_HW_XINT + b .Lskip_hw_xint\@ + alternative_else_nop_endif + kernel_entry 0, 64, xint + mov x0, sp + bl el0t_64_xint_handler + kernel_exit 0, xint +.Lskip_hw_xint\@: + .endif #endif kernel_entry \el, \regsize mov x0, sp @@ -760,7 +776,11 @@ SYM_CODE_END(el\el\ht\()_\regsize\()_\label) entry_handler 0, t, 64, error entry_handler 0, t, 32, sync +#ifdef CONFIG_ARCH_SUPPORTS_XINT + entry_handler 0, t, 64, xint +#else entry_handler 0, t, 32, irq +#endif entry_handler 0, t, 32, fiq entry_handler 0, t, 32, error @@ -905,7 +925,14 @@ alternative_else_nop_endif .rept 4 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb .endr - .rept 4 + +#ifdef CONFIG_ARCH_SUPPORTS_XINT + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb +#else + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb +#endif + + .rept 3 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb .endr .endm @@ -955,7 +982,14 @@ SYM_CODE_END(tramp_exit) .rept 4 tramp_ventry .Lvector_start\@, 64, 0, \bhb .endr - .rept 4 + +#ifdef CONFIG_ARCH_SUPPORTS_XINT + tramp_ventry .Lvector_start\@, 64, 0, \bhb +#else + tramp_ventry .Lvector_start\@, 32, 0, \bhb +#endif + + .rept 3 tramp_ventry .Lvector_start\@, 32, 0, \bhb .endr .endm diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index f2ddced689b5..b1f109f17e4f 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -110,7 +110,7 @@ WORKAROUND_HISI_HIP08_RU_PREFETCH WORKAROUND_HISILICON_1980005 HAS_XCALL HAS_XINT -KABI_RESERVE_3 +HAS_HW_XINT KABI_RESERVE_4 KABI_RESERVE_5 KABI_RESERVE_6 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 4cc8b95d533f..7b0c6be5e811 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -422,6 +422,35 @@ static void gic_irq_configure_nmi(struct irq_data *d, bool enable) raw_spin_unlock(&irq_controller_lock); } +#ifdef CONFIG_ARCH_SUPPORTS_XINT +#define IPI_NMI_HWIRQ 7 +#define PERF_PMU_HWIRQ 23 + +bool check_xint(unsigned long hwirq) +{ + if (hwirq == IPI_NMI_HWIRQ) + return true; + +#ifdef CONFIG_ARM_PMU + if (hwirq == PERF_PMU_HWIRQ) + return true; +#endif + + return false; +} +EXPORT_SYMBOL(check_xint); + +bool is_spi(unsigned long hwirq) +{ + if (__get_intid_range(hwirq) == SPI_RANGE || + __get_intid_range(hwirq) == ESPI_RANGE) + return true; + + return false; +} +EXPORT_SYMBOL(is_spi); +#endif + static void gic_irq_enable_nmi(struct irq_data *d) { gic_irq_configure_nmi(d, true); @@ -828,10 +857,11 @@ static bool gic_rpr_is_nmi_prio(void) return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI)); } -static bool gic_irqnr_is_special(u32 irqnr) +bool gic_irqnr_is_special(u32 irqnr) { return irqnr >= 1020 && irqnr <= 1023; } +EXPORT_SYMBOL(gic_irqnr_is_special); static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) { @@ -958,7 +988,7 @@ static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) } #ifdef CONFIG_ARM64 -static inline u64 gic_read_nmiar(void) +inline u64 gic_read_nmiar(void) { u64 irqstat; @@ -968,6 +998,7 @@ static inline u64 gic_read_nmiar(void) return irqstat; } +EXPORT_SYMBOL(gic_read_nmiar); static asmlinkage void __exception_irq_entry gic_handle_nmi_irq(struct pt_regs *regs) { @@ -2358,6 +2389,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base, goto out_free; } + irq_set_default_host(gic_data.domain); irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); gic_data.has_rss = !!(typer & GICD_TYPER_RSS); -- 2.34.1

hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IBV2E4 -------------------------------- Support hardware xcall. Harware xcall provides a separate entry for el0 syscall handling, so we can use it to customize and respond to system call relatively quickly. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/Kconfig | 14 ++++----- arch/arm64/Kconfig | 5 ++++ arch/arm64/include/asm/sysreg.h | 3 ++ arch/arm64/kernel/cpufeature.c | 49 ++++++++++++++++++++++++++------ arch/arm64/kernel/entry-common.c | 4 +-- arch/arm64/kernel/entry.S | 38 +++++++++++++++++++++++-- arch/arm64/tools/cpucaps | 2 +- 7 files changed, 93 insertions(+), 22 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index a4ed5d338dad..dd7b91f19edc 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1574,12 +1574,12 @@ config FAST_IRQ framework for latency-sensitive interrupts. config DEBUG_FEATURE_BYPASS - bool "Bypass debug feature in fast syscall" - depends on FAST_SYSCALL || FAST_IRQ + bool "Bypass debug feature in fast syscall/irq and hardware xcall" + depends on FAST_SYSCALL || FAST_IRQ || ARCH_SUPPORTS_XCALL depends on !LOCKDEP default y help - This to bypass debug feature in fast syscall. + This to bypass debug feature in fast syscall/irq and hardware xcall. The svc exception handling process, which includes auxiliary functions for debug/trace and core functions like KPTI, has been identified as overly "lengthy". @@ -1587,12 +1587,12 @@ config DEBUG_FEATURE_BYPASS Disable this config to keep debug feature in fast syscall. config SECURITY_FEATURE_BYPASS - bool "Bypass security feature in fast syscall" - depends on FAST_SYSCALL || FAST_IRQ + bool "Bypass security feature in fast syscall and hardware xcall" + depends on FAST_SYSCALL || FAST_IRQ || ARCH_SUPPORTS_XCALL default y help - This to bypass security feature in fast syscall. - The svc exception handling process, which includes auxiliary + This to bypass security feature in fast syscall/irq and hardware + xcall. The svc exception handling process, which includes auxiliary functions for debug/trace and core functions like KPTI, has been identified as overly "lengthy". In fast syscall we only considers necessary features. diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 57d05cfbd29e..14b818fce7a1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1724,6 +1724,11 @@ config ARCH_SUPPORTS_XINT depends on ARM_GIC_V3 depends on !COMPAT +config ARCH_SUPPORTS_XCALL + bool "Hardware xcall support" + depends on !COMPAT + default n + config TRANS_TABLE def_bool y depends on HIBERNATION || KEXEC_CORE diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b34e8cc4476b..f97b2b400266 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -270,6 +270,9 @@ #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) +#define ACTLR_ELx_XCALL_SHIFT 20 +#define ACTLR_ELx_XCALL (BIT(ACTLR_ELx_XCALL_SHIFT)) + #define ACTLR_ELx_XINT_SHIFT 21 #define ACTLR_ELx_XINT (BIT(ACTLR_ELx_XINT_SHIFT)) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4f7cd116c36f..e4537acb490f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2424,16 +2424,24 @@ static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __u } #endif -#ifdef CONFIG_ARCH_SUPPORTS_XINT -static bool test_has_xint_xcall(void) +#if defined(CONFIG_ARCH_SUPPORTS_XINT) || defined(CONFIG_ARCH_SUPPORTS_XCALL) +static bool test_has_xint_xcall(bool is_xint) { u64 new, old = read_sysreg(actlr_el1); - write_sysreg(old | ACTLR_ELx_XINT, actlr_el1); + if (is_xint) + write_sysreg(old | ACTLR_ELx_XINT, actlr_el1); + else + write_sysreg(old | ACTLR_ELx_XCALL, actlr_el1); isb(); new = read_sysreg(actlr_el1); - if (new & ACTLR_ELx_XINT) { + if (is_xint && (new & ACTLR_ELx_XINT)) { + write_sysreg(old, actlr_el1); + return true; + } + + if (!is_xint && (new & ACTLR_ELx_XCALL)) { write_sysreg(old, actlr_el1); return true; } @@ -2441,7 +2449,7 @@ static bool test_has_xint_xcall(void) return false; } -static void enable_xint_xcall(void) +static void enable_xint_xcall(bool is_xint) { u64 actlr_el1, actlr_el2; u64 el; @@ -2449,7 +2457,7 @@ static void enable_xint_xcall(void) el = read_sysreg(CurrentEL); if (el == CurrentEL_EL2) { actlr_el2 = read_sysreg(actlr_el2); - actlr_el2 |= ACTLR_ELx_XINT; + actlr_el2 |= (is_xint ? ACTLR_ELx_XINT : ACTLR_ELx_XCALL); write_sysreg(actlr_el2, actlr_el2); isb(); actlr_el2 = read_sysreg(actlr_el2); @@ -2457,13 +2465,15 @@ static void enable_xint_xcall(void) } actlr_el1 = read_sysreg(actlr_el1); - actlr_el1 |= ACTLR_ELx_XINT; + actlr_el1 |= (is_xint ? ACTLR_ELx_XINT : ACTLR_ELx_XCALL);; write_sysreg(actlr_el1, actlr_el1); isb(); actlr_el1 = read_sysreg(actlr_el1); pr_info("actlr_el1: %llx, cpu:%d\n", actlr_el1, smp_processor_id()); } +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XINT static bool test_has_xint(const struct arm64_cpu_capabilities *entry, int scope) { if (!IS_ENABLED(CONFIG_ARM64_NMI)) @@ -2475,12 +2485,24 @@ static bool test_has_xint(const struct arm64_cpu_capabilities *entry, int scope) } #endif - return test_has_xint_xcall(); + return test_has_xint_xcall(true); } static void xint_enable(const struct arm64_cpu_capabilities *__unused) { - enable_xint_xcall(); + enable_xint_xcall(true); +} +#endif + +#ifdef CONFIG_ARCH_SUPPORTS_XCALL +static bool test_has_xcall(const struct arm64_cpu_capabilities *entry, int scope) +{ + return test_has_xint_xcall(false); +} + +static void xcall_enable(const struct arm64_cpu_capabilities *__unused) +{ + enable_xint_xcall(false); } #endif @@ -3040,6 +3062,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = test_has_xint, .cpu_enable = xint_enable, }, +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + { + .desc = "Hardware xcall Support", + .capability = ARM64_HAS_HW_XCALL, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = test_has_xcall, + .cpu_enable = xcall_enable, + }, #endif {}, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 81e5286eb81f..1ca7ca3b853a 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -154,7 +154,7 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) exit_to_user_mode(regs); } -#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_FAST_IRQ) +#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_FAST_IRQ) || defined(CONFIG_ARCH_SUPPORTS_XCALL) /* * Copy from exit_to_user_mode_prepare */ @@ -818,7 +818,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) exit_to_user_mode(regs); } -#ifdef CONFIG_FAST_SYSCALL +#if defined(CONFIG_FAST_SYSCALL) || defined(CONFIG_ARCH_SUPPORTS_XCALL) /* Copy from el0_sync */ static void noinstr el0_xcall(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 046225fa2f90..23c1b255e02e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -564,7 +564,11 @@ SYM_CODE_START(vectors) kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 kernel_ventry 0, t, 64, error // Error 64-bit EL0 +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + kernel_ventry 0, t, 64, xcall // xcall synchronous 64-bit EL0 +#else kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 +#endif #ifdef CONFIG_ARCH_SUPPORTS_XINT kernel_ventry 0, t, 64, xint // XINT 64-bit EL0 #else @@ -722,7 +726,19 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) b .Lret_to_kernel_entry\@ alternative_else_nop_endif check_xcall_pre_kernel_entry - .Lret_to_kernel_entry\@: +.Lret_to_kernel_entry\@: + .endif +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + .if \el == 0 && \regsize == 64 && \label == xcall + alternative_if_not ARM64_HAS_HW_XCALL + b .Lskip_hw_xcall\@ + alternative_else_nop_endif + kernel_entry 0, 64, xcall + mov x0, sp + bl el0t_64_xcall_handler + kernel_exit 0, xcall +.Lskip_hw_xcall\@: .endif #endif #ifdef CONFIG_FAST_IRQ @@ -775,7 +791,11 @@ SYM_CODE_END(el\el\ht\()_\regsize\()_\label) entry_handler 0, t, 64, fiq entry_handler 0, t, 64, error +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + entry_handler 0, t, 64, xcall +#else entry_handler 0, t, 32, sync +#endif #ifdef CONFIG_ARCH_SUPPORTS_XINT entry_handler 0, t, 64, xint #else @@ -926,13 +946,19 @@ alternative_else_nop_endif tramp_ventry .Lvector_start\@, 64, \kpti, \bhb .endr +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb +#else + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb +#endif + #ifdef CONFIG_ARCH_SUPPORTS_XINT tramp_ventry .Lvector_start\@, 64, \kpti, \bhb #else tramp_ventry .Lvector_start\@, 32, \kpti, \bhb #endif - .rept 3 + .rept 2 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb .endr .endm @@ -983,13 +1009,19 @@ SYM_CODE_END(tramp_exit) tramp_ventry .Lvector_start\@, 64, 0, \bhb .endr +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + tramp_ventry .Lvector_start\@, 64, 0, \bhb +#else + tramp_ventry .Lvector_start\@, 32, 0, \bhb +#endif + #ifdef CONFIG_ARCH_SUPPORTS_XINT tramp_ventry .Lvector_start\@, 64, 0, \bhb #else tramp_ventry .Lvector_start\@, 32, 0, \bhb #endif - .rept 3 + .rept 2 tramp_ventry .Lvector_start\@, 32, 0, \bhb .endr .endm diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index b1f109f17e4f..3a6e38f45618 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -110,8 +110,8 @@ WORKAROUND_HISI_HIP08_RU_PREFETCH WORKAROUND_HISILICON_1980005 HAS_XCALL HAS_XINT +HAS_HW_XCALL HAS_HW_XINT -KABI_RESERVE_4 KABI_RESERVE_5 KABI_RESERVE_6 KABI_RESERVE_7 -- 2.34.1

反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/15594 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/ROD... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/15594 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/ROD...
participants (2)
-
Jinjie Ruan
-
patchwork bot