From: Liao Chang <liaochang1@huawei.com> hulk inclusion category: feature bugzilla: https://atomgit.com/openeuler/kernel/issues/8918 ------------------------------------------ In order to support hisilicon TF firmware which delegates SEI to lower exception software by jumping to VBAR_EL2 + el1h_64_error offset directly, it needs some changes to the standard Arm64 exception handling: - Place ESB at the exception vector entry from EL0 and before ERET returns to EL0. NOTICE: The exception vectors traps from EL0 have four different versions depends on kernel config. el0t_64_sync traps to the code generated by macro sync_ventry with CONFIG_FAST_SYSCALL is enabled. el0t_32_sync traps to the code generated by macro xcall_ventry with CONFIG_ACTLR_XCALL_XINT is enableu. All exceptions from EL0 trap to the code generated by macro tramp_ventry when KPTI or spectre mitigation is enabled. otherwise, it traps to the code generated by macro kernel_ventry by default. - Since the hisilicon TF firmware clobber the SP_EL0 to delegate SError to lower exception level, so it needs to restore the SP_EL0 from this_cpu per-cpu variable for el1h_64_error vector. Signed-off-by: Liao Chang <liaochang1@huawei.com> Signed-off-by: Wupeng Ma <mawupeng1@huawei.com> --- arch/arm64/Kconfig | 9 +++++++++ arch/arm64/kernel/entry.S | 35 +++++++++++++++++++++++++++++++++ arch/arm64/kernel/xcall/entry.S | 2 ++ 3 files changed, 46 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 74e4639776de..86fbcc277e56 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2505,6 +2505,15 @@ config ARM64_PSEUDO_NMI If unsure, say N +config ARM64_SYNC_SEI + bool "Use ESB to Synchronize SEI At Exception Boundary(EXPERIMENTAL)" + depends on ARM64_RAS_EXTN + help + For Firmware-First, Use the ESB to synchronize SEI occurs before + exception entry from EL0 and exit to EL0. + + if unsure, say N + if ARM64_PSEUDO_NMI config ARM64_DEBUG_PRIORITY_MASKING bool "Debug interrupt priority masking" diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 039ec8d40899..f9f358f41682 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -29,6 +29,27 @@ #include <asm/asm-uaccess.h> #include <asm/unistd.h> + .macro sync_sei +#ifdef CONFIG_ARM64_SYNC_SEI + /* Use ESB to synchronize SEI at the entry and exit of exception */ + esb + .endm + + .macro sei_restore_sp_el0, tmp1:req, tmp2:req + /* + * It must restore SP_EL0 from per-cpu variable __entry_task, since TF + * firmware clobbers the SP_EL0 before SEI is delegated back. + */ + mov \tmp1, (1UL << VA_BITS) + mrs \tmp2, sp_el0 + cmp \tmp2, \tmp1 + b.cs .Lskip_sp_el0_restore + ldr_this_cpu \tmp2, __entry_task, \tmp1 + msr sp_el0, \tmp2 +.Lskip_sp_el0_restore: +#endif /* CONFIG_ARM64_SYNC_SEI */ + .endm + .macro clear_gp_regs .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 mov x\n, xzr @@ -39,6 +60,7 @@ .align 7 .Lventry_start\@: .if \el == 0 + sync_sei /* * This must be the first instruction of the EL0 vector entries. It is * skipped by the trampoline vectors, to trigger the cleanup. @@ -482,6 +504,7 @@ alternative_else_nop_endif #endif ldr lr, [sp, #S_LR] + sync_sei add sp, sp, #PT_REGS_SIZE // restore sp /* This must be after the last explicit memory access */ @@ -715,6 +738,13 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) .endif #endif kernel_entry \el, \regsize + +#ifdef CONFIG_ARM64_SYNC_SEI + .if \el == 1 && \ht == h && \label == error + sei_restore_sp_el0 x20, x21 + .endif +#endif + mov x0, sp bl el\el\ht\()_\regsize\()_\label\()_handler .if \el == 0 @@ -832,6 +862,7 @@ alternative_else_nop_endif .macro tramp_ventry, vector_start, regsize, kpti, bhb .align 7 1: + sync_sei .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif @@ -906,6 +937,10 @@ alternative_endif .endif // \bhb == BHB_MITIGATION_FW add x30, x30, #(1b - \vector_start + 4) +#ifdef CONFIG_ARM64_SYNC_SEI + /* Skip the 'ESB' and 'B' at default vector entry */ + add x30, x30, #4 +#endif ret .org 1b + 128 // Did we overflow the ventry slot? .endm diff --git a/arch/arm64/kernel/xcall/entry.S b/arch/arm64/kernel/xcall/entry.S index d5ed68db1547..283a1191abab 100644 --- a/arch/arm64/kernel/xcall/entry.S +++ b/arch/arm64/kernel/xcall/entry.S @@ -240,6 +240,7 @@ SYM_CODE_END(el0t_64_sync_table) .macro xcall_ventry .align 7 .Lventry_start\@: + sync_sei /* * This must be the first instruction of the EL0 vector entries. It is * skipped by the trampoline vectors, to trigger the cleanup. @@ -266,6 +267,7 @@ SYM_CODE_END(el0t_64_sync_table) .macro sync_ventry .align 7 .Lventry_start\@: + sync_sei /* * This must be the first instruction of the EL0 vector entries. It is * skipped by the trampoline vectors, to trigger the cleanup. -- 2.43.0