From: Srinivas Ramana sramana@codeaurora.org
mainline inclusion from mainline-v5.12-rc1 commit 7f6240858cf3abb75237c9ba63ec70d232573ae8 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7QNYP
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit?id...
----------------------------------------
Defer enabling pointer authentication on boot core until after its required to be enabled by cpufeature framework. This will help in controlling the feature dynamically with a boot parameter.
Signed-off-by: Ajay Patil pajay@qti.qualcomm.com Signed-off-by: Prasad Sodagudi psodagud@codeaurora.org Signed-off-by: Srinivas Ramana sramana@codeaurora.org Signed-off-by: Marc Zyngier maz@kernel.org Link: https://lore.kernel.org/r/1610152163-16554-2-git-send-email-sramana@codeauro... Reviewed-by: Catalin Marinas catalin.marinas@arm.com Acked-by: David Brazdil dbrazdil@google.com Link: https://lore.kernel.org/r/20210208095732.3267263-22-maz@kernel.org Signed-off-by: Will Deacon will@kernel.org Signed-off-by: GONG, Ruiqi gongruiqi1@huawei.com --- arch/arm64/include/asm/pointer_auth.h | 10 ++++++++++ arch/arm64/include/asm/stackprotector.h | 1 + arch/arm64/kernel/head.S | 4 ---- 3 files changed, 11 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index c6b4f0603024..b112a11e9302 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -76,6 +76,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) return ptrauth_clear_pac(ptr); }
+static __always_inline void ptrauth_enable(void) +{ + if (!system_supports_address_auth()) + return; + sysreg_clear_set(sctlr_el1, 0, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)); + isb(); +} + #define ptrauth_thread_init_user(tsk) \ ptrauth_keys_init_user(&(tsk)->thread.keys_user) #define ptrauth_thread_init_kernel(tsk) \ @@ -84,6 +93,7 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
#else /* CONFIG_ARM64_PTR_AUTH */ +#define ptrauth_enable() #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_thread_init_user(tsk) diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h index 7263e0bac680..33f1bb453150 100644 --- a/arch/arm64/include/asm/stackprotector.h +++ b/arch/arm64/include/asm/stackprotector.h @@ -41,6 +41,7 @@ static __always_inline void boot_init_stack_canary(void) #endif ptrauth_thread_init_kernel(current); ptrauth_thread_switch_kernel(current); + ptrauth_enable(); }
#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b7a9f403ad2d..ad153156f9bd 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -420,10 +420,6 @@ SYM_FUNC_START_LOCAL(__primary_switched) adr_l x5, init_task msr sp_el0, x5 // Save thread_info
-#ifdef CONFIG_ARM64_PTR_AUTH - __ptrauth_keys_init_cpu x5, x6, x7, x8 -#endif - adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb