From: Mark Rutland mark.rutland@arm.com
mainline inclusion from v5.8-rc1 commit 62a679cb2825 category: feature bugzilla: 27615 CVE: NA
-------------------------------------------------
Currently __cpu_setup conditionally initializes the address authentication keys and enables them in SCTLR_EL1, doing so differently for the primary CPU and secondary CPUs, and skipping this work for CPUs returning from an idle state. For the latter case, cpu_do_resume restores the keys and SCTLR_EL1 value after the MMU has been enabled.
This flow is rather difficult to follow, so instead let's move the primary and secondary CPU initialization into their respective boot paths. By following the example of cpu_do_resume and doing so once the MMU is enabled, we can always initialize the keys from the values in thread_struct, and avoid the machinery necessary to pass the keys in secondary_data or open-coding initialization for the boot CPU.
This means we perform an additional RMW of SCTLR_EL1, but we already do this in the cpu_do_resume path, and for other features in cpufeature.c, so this isn't a major concern in a bringup path. Note that even while the enable bits are clear, the key registers are accessible.
As this now renders the argument to __cpu_setup redundant, let's also remove that entirely. Future extensions can follow a similar approach to initialize values that differ for primary/secondary CPUs.
Signed-off-by: Mark Rutland mark.rutland@arm.com Tested-by: Amit Daniel Kachhap amit.kachhap@arm.com Reviewed-by: Amit Daniel Kachhap amit.kachhap@arm.com Cc: Amit Daniel Kachhap amit.kachhap@arm.com Cc: Catalin Marinas catalin.marinas@arm.com Cc: James Morse james.morse@arm.com Cc: Suzuki K Poulose suzuki.poulose@arm.com Cc: Will Deacon will@kernel.org Link: https://lore.kernel.org/r/20200423101606.37601-3-mark.rutland@arm.com Signed-off-by: Will Deacon will@kernel.org
Conflicts: arch/arm64/kernel/smp.c [Zheng Zengkai: fix conflicts caused by skipping the following commit. 5b1cfe3a arm64: smp: Don't enter kernel with NULL stack pointer or task struct]
Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/asm_pointer_auth.h | 22 ++++++++++++ arch/arm64/include/asm/smp.h | 11 ------ arch/arm64/kernel/asm-offsets.c | 3 -- arch/arm64/kernel/head.S | 12 +++++-- arch/arm64/kernel/sleep.S | 1 - arch/arm64/kernel/smp.c | 8 ----- arch/arm64/mm/proc.S | 44 ----------------------- 7 files changed, 32 insertions(+), 69 deletions(-)
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h index c85540a911d3c..52dead2a8640d 100644 --- a/arch/arm64/include/asm/asm_pointer_auth.h +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -60,6 +60,28 @@ alternative_if ARM64_HAS_ADDRESS_AUTH alternative_else_nop_endif .endm
+ .macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3 + mrs \tmp1, id_aa64isar1_el1 + ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8 + cbz \tmp1, .Lno_addr_auth@ + mov_q \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) + mrs \tmp2, sctlr_el1 + orr \tmp2, \tmp2, \tmp1 + msr sctlr_el1, \tmp2 + __ptrauth_keys_install_kernel_nosync \tsk, \tmp1, \tmp2, \tmp3 + isb +.Lno_addr_auth@: + .endm + + .macro ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3 +alternative_if_not ARM64_HAS_ADDRESS_AUTH + b .Lno_addr_auth@ +alternative_else_nop_endif + __ptrauth_keys_init_cpu \tsk, \tmp1, \tmp2, \tmp3 +.Lno_addr_auth@: + .endm + #else /* CONFIG_ARM64_PTR_AUTH */
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 0faa0f62f4033..65d29eb316277 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -27,14 +27,6 @@ /* Fatal system error detected by secondary CPU, crash the system */ #define CPU_PANIC_KERNEL (3)
-/* Possible options for __cpu_setup */ -/* Option to setup primary cpu */ -#define ARM64_CPU_BOOT_PRIMARY (1) -/* Option to setup secondary cpus */ -#define ARM64_CPU_BOOT_SECONDARY (2) -/* Option to setup cpus for different cpu run time services */ -#define ARM64_CPU_RUNTIME (3) - #ifndef __ASSEMBLY__
#include <asm/percpu.h> @@ -94,9 +86,6 @@ asmlinkage void secondary_start_kernel(void); struct secondary_data { void *stack; struct task_struct *task; -#ifdef CONFIG_ARM64_PTR_AUTH - struct ptrauth_keys_kernel ptrauth_key; -#endif long status; };
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 35f1b16956a88..a792b4d9b89eb 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -138,9 +138,6 @@ int main(void) BLANK(); DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); -#ifdef CONFIG_ARM64_PTR_AUTH - DEFINE(CPU_BOOT_PTRAUTH_KEY, offsetof(struct secondary_data, ptrauth_key)); -#endif BLANK(); #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c8a64e5bb9c11..dbee08c9d7d1e 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -24,6 +24,7 @@ #include <linux/init.h> #include <linux/irqchip/arm-gic-v3.h>
+#include <asm/asm_pointer_auth.h> #include <asm/assembler.h> #include <asm/boot.h> #include <asm/ptrace.h> @@ -126,7 +127,6 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ - mov x0, #ARM64_CPU_BOOT_PRIMARY bl __cpu_setup // initialise processor b __primary_switch ENDPROC(stext) @@ -411,6 +411,10 @@ __primary_switched: adr_l x5, init_task msr sp_el0, x5 // Save thread_info
+#ifdef CONFIG_ARM64_PTR_AUTH + __ptrauth_keys_init_cpu x5, x6, x7, x8 +#endif + adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb @@ -705,7 +709,6 @@ secondary_startup: * Common entry point for secondary CPUs. */ bl __cpu_secondary_check52bitva - mov x0, #ARM64_CPU_BOOT_SECONDARY bl __cpu_setup // initialise processor bl __enable_mmu ldr x8, =__secondary_switched @@ -724,6 +727,11 @@ __secondary_switched: msr sp_el0, x2 mov x29, #0 mov x30, #0 + +#ifdef CONFIG_ARM64_PTR_AUTH + ptrauth_keys_init_cpu x2, x3, x4, x5 +#endif + b secondary_start_kernel ENDPROC(__secondary_switched)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 8eee57d97281a..f7193aef2cc83 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -100,7 +100,6 @@ ENDPROC(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly - mov x0, #ARM64_CPU_RUNTIME bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ bl __enable_mmu diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ce0e7055dbf7e..5235b9aa05241 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -125,10 +125,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) */ secondary_data.task = idle; secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; -#if defined(CONFIG_ARM64_PTR_AUTH) - secondary_data.ptrauth_key.apia.lo = idle->thread.keys_kernel.apia.lo; - secondary_data.ptrauth_key.apia.hi = idle->thread.keys_kernel.apia.hi; -#endif update_cpu_boot_status(CPU_MMU_OFF); __flush_dcache_area(&secondary_data, sizeof(secondary_data));
@@ -159,10 +155,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
secondary_data.task = NULL; secondary_data.stack = NULL; -#if defined(CONFIG_ARM64_PTR_AUTH) - secondary_data.ptrauth_key.apia.lo = 0; - secondary_data.ptrauth_key.apia.hi = 0; -#endif status = READ_ONCE(secondary_data.status); if (ret && status) {
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index d6e0886fa1e94..9bb8df527ccb0 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -401,8 +401,6 @@ ENDPROC(idmap_kpti_install_ng_mappings) * * Initialise the processor for turning the MMU on. * - * Input: - * x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME. * Output: * Return in x0 the value of the SCTLR_EL1 register. */ @@ -463,51 +461,9 @@ ENTRY(__cpu_setup) 1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 - mov x1, x0 /* * Prepare SCTLR */ mov_q x0, SCTLR_EL1_SET - -#ifdef CONFIG_ARM64_PTR_AUTH - /* No ptrauth setup for run time cpus */ - cmp x1, #ARM64_CPU_RUNTIME - b.eq 3f - - /* Check if the CPU supports ptrauth */ - mrs x2, id_aa64isar1_el1 - ubfx x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8 - cbz x2, 3f - - /* - * The primary cpu keys are reset here and can be - * re-initialised with some proper values later. - */ - msr_s SYS_APIAKEYLO_EL1, xzr - msr_s SYS_APIAKEYHI_EL1, xzr - - /* Just enable ptrauth for primary cpu */ - cmp x1, #ARM64_CPU_BOOT_PRIMARY - b.eq 2f - - /* if !system_supports_address_auth() then skip enable */ -alternative_if_not ARM64_HAS_ADDRESS_AUTH - b 3f -alternative_else_nop_endif - - /* Install ptrauth key for secondary cpus */ - adr_l x2, secondary_data - ldr x3, [x2, #CPU_BOOT_TASK] // get secondary_data.task - cbz x3, 2f // check for slow booting cpus - ldp x3, x4, [x2, #CPU_BOOT_PTRAUTH_KEY] - msr_s SYS_APIAKEYLO_EL1, x3 - msr_s SYS_APIAKEYHI_EL1, x4 - -2: /* Enable ptrauth instructions */ - ldr x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ - SCTLR_ELx_ENDA | SCTLR_ELx_ENDB - orr x0, x0, x2 -3: -#endif ret // return to head.S ENDPROC(__cpu_setup)