From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.5-rc3 commit c2d92353b28f8542c6b3150900b38c94b1d61480 category:feature bugzilla:NA CVE:NA
-------------------
In preparation for integrating E0PD support with KASLR factor out the checks for interaction between KASLR and KPTI done in boot context into a new function kaslr_requires_kpti(), in the process clarifying the distinction between what we do in boot context and what we do at runtime.
Signed-off-by: Mark Brown broonie@kernel.org Reviewed-by: Suzuki K Poulose suzuki.poulose@arm.com Signed-off-by: Will Deacon will@kernel.org
Conflicts: arch/arm64/include/asm/mmu.h
Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/mmu.h | 49 ++++++++++++++++++++++++++++++++++ arch/arm64/kernel/cpufeature.c | 2 +- 2 files changed, 50 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 3e02efc9cdd0..3bfd07b1980c 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -56,6 +56,55 @@ static inline bool arm64_kernel_unmapped_at_el0(void) cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); }
+/* + * This check is triggered during the early boot before the cpufeature + * is initialised. Checking the status on the local CPU allows the boot + * CPU to detect the need for non-global mappings and thus avoiding a + * pagetable re-write after all the CPUs are booted. This check will be + * anyway run on individual CPUs, allowing us to get the consistent + * state once the SMP CPUs are up and thus make the switch to non-global + * mappings if required. + */ +static inline bool kaslr_requires_kpti(void) +{ + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return false; + + return kaslr_offset() > 0; +} + +static inline bool arm64_kernel_use_ng_mappings(void) +{ + /* What's a kpti? Use global mappings if we don't know. */ + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) + return false; + + /* + * Note: this function is called before the CPU capabilities have + * been configured, so our early mappings will be global. If we + * later determine that kpti is required, then + * kpti_install_ng_mappings() will make them non-global. + */ + if (arm64_kernel_unmapped_at_el0()) + return true; + + /* + * Once we are far enough into boot for capabilities to be + * ready we will have confirmed if we are using non-global + * mappings so don't need to consider anything else here. + */ + if (static_branch_likely(&arm64_const_caps_ready)) + return false; + + /* + * KASLR is enabled so we're going to be enabling kpti on non-broken + * CPUs regardless of their susceptibility to Meltdown. Rather + * than force everybody to go through the G -> nG dance later on, + * just put down non-global mappings from the beginning + */ + return kaslr_requires_kpti(); +} + typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data { diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b1c408ef6ea3..7bbd8809e15a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1062,7 +1062,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, }
/* Useful for KASLR robustness */ - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) { + if (kaslr_requires_kpti()) { if (!__kpti_forced) { str = "KASLR"; __kpti_forced = 1;