From: Liao Chang <liaochang1@huawei.com> hulk inclusion category: feature bugzilla: https://atomgit.com/openeuler/kernel/issues/9116 ------------------------------------------ In order to support hisilicon TF firmware which delegates SEI to lower exception software by jumping to excetion table directly, it needs some changes to the standard Arm64 exception handling. Signed-off-by: Liao Chang <liaochang1@huawei.com> Signed-off-by: Wupeng Ma <mawupeng1@huawei.com> --- arch/arm64/Kconfig | 9 ++++++ arch/arm64/kernel/entry.S | 52 +++++++++++++++++++++++++++++++++ arch/arm64/kernel/xcall/entry.S | 6 ++-- 3 files changed, 65 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 74e4639776de..86fbcc277e56 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2505,6 +2505,15 @@ config ARM64_PSEUDO_NMI If unsure, say N +config ARM64_SYNC_SEI + bool "Use ESB to Synchronize SEI At Exception Boundary(EXPERIMENTAL)" + depends on ARM64_RAS_EXTN + help + For Firmware-First, Use the ESB to synchronize SEI occurs before + exception entry from EL0 and exit to EL0. + + if unsure, say N + if ARM64_PSEUDO_NMI config ARM64_DEBUG_PRIORITY_MASKING bool "Debug interrupt priority masking" diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 039ec8d40899..72135826baad 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -29,6 +29,29 @@ #include <asm/asm-uaccess.h> #include <asm/unistd.h> + .macro sync_sei, label = esb +#ifdef CONFIG_ARM64_SYNC_SEI + .if \label != xcall + /* Use ESB to synchronize SEI at the entry and exit of exception */ + esb + .endif + .endm + + .macro sei_restore_sp_el0, tmp1:req, tmp2:req + /* + * It must restore SP_EL0 from per-cpu variable __entry_task, since TF + * firmware clobbers the SP_EL0 before SEI is delegated back. + */ + mov \tmp1, (1UL << VA_BITS) + mrs \tmp2, sp_el0 + cmp \tmp2, \tmp1 + b.cs .Lskip_sp_el0_restore + ldr_this_cpu \tmp2, __entry_task, \tmp1 + msr sp_el0, \tmp2 +.Lskip_sp_el0_restore: +#endif /* CONFIG_ARM64_SYNC_SEI */ + .endm + .macro clear_gp_regs .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 mov x\n, xzr @@ -39,6 +62,7 @@ .align 7 .Lventry_start\@: .if \el == 0 + sync_sei \label /* * This must be the first instruction of the EL0 vector entries. It is * skipped by the trampoline vectors, to trigger the cleanup. @@ -75,7 +99,15 @@ tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp +#ifdef CONFIG_FAST_SYSCALL + .if \label == xcall + b el\el\ht\()_\regsize\()_sync + .else + b el\el\ht\()_\regsize\()_\label + .endif +#else b el\el\ht\()_\regsize\()_\label +#endif 0: /* @@ -107,7 +139,15 @@ sub sp, sp, x0 mrs x0, tpidrro_el0 #endif +#ifdef CONFIG_FAST_SYSCALL + .if \label == xcall + b el\el\ht\()_\regsize\()_sync + .else + b el\el\ht\()_\regsize\()_\label + .endif +#else b el\el\ht\()_\regsize\()_\label +#endif .org .Lventry_start\@ + 128 // Did we overflow the ventry slot? .endm @@ -715,6 +755,13 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) .endif #endif kernel_entry \el, \regsize + +#ifdef CONFIG_ARM64_SYNC_SEI + .if \el == 1 && \ht == h && \label == error + sei_restore_sp_el0 x20, x21 + .endif +#endif + mov x0, sp bl el\el\ht\()_\regsize\()_\label\()_handler .if \el == 0 @@ -832,6 +879,7 @@ alternative_else_nop_endif .macro tramp_ventry, vector_start, regsize, kpti, bhb .align 7 1: + sync_sei .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif @@ -906,6 +954,10 @@ alternative_endif .endif // \bhb == BHB_MITIGATION_FW add x30, x30, #(1b - \vector_start + 4) +#ifdef CONFIG_ARM64_SYNC_SEI + /* Skip the 'ESB' and 'B' at default vector entry */ + add x30, x30, #4 +#endif ret .org 1b + 128 // Did we overflow the ventry slot? .endm diff --git a/arch/arm64/kernel/xcall/entry.S b/arch/arm64/kernel/xcall/entry.S index d5ed68db1547..460922506e62 100644 --- a/arch/arm64/kernel/xcall/entry.S +++ b/arch/arm64/kernel/xcall/entry.S @@ -209,13 +209,13 @@ SYM_CODE_START_LOCAL(el0t_fast_syscall) SYM_CODE_END(el0t_fast_syscall) SYM_CODE_START_LOCAL(el0t_64_sync_ventry) - kernel_ventry 0, t, 64, sync + kernel_ventry 0, t, 64, xcall SYM_CODE_END(el0t_64_sync_ventry) SYM_CODE_START_LOCAL(el0t_64_sync_ventry_vector) ldp x20, x21, [sp, #16 * 10] add sp, sp, #PT_REGS_SIZE - kernel_ventry 0, t, 64, sync + kernel_ventry 0, t, 64, xcall SYM_CODE_END(el0t_64_sync_ventry_vector) SYM_CODE_START_LOCAL(el0t_64_sync_table) @@ -240,6 +240,7 @@ SYM_CODE_END(el0t_64_sync_table) .macro xcall_ventry .align 7 .Lventry_start\@: + sync_sei /* * This must be the first instruction of the EL0 vector entries. It is * skipped by the trampoline vectors, to trigger the cleanup. @@ -266,6 +267,7 @@ SYM_CODE_END(el0t_64_sync_table) .macro sync_ventry .align 7 .Lventry_start\@: + sync_sei /* * This must be the first instruction of the EL0 vector entries. It is * skipped by the trampoline vectors, to trigger the cleanup. -- 2.43.0