Chen Jun (1): config: set default value of CONFIG_ARM64_E0PD
Mark Brown (4): arm64: Add initial support for E0PD arm64: Factor out checks for KASLR in KPTI code into separate function arm64: Don't use KPTI where we have E0PD arm64: Use a variable to store non-global mappings decision
Will Deacon (1): arm64: kpti: Fix "kpti=off" when KASLR is enabled
arch/arm64/Kconfig | 16 +++++++ arch/arm64/configs/euleros_defconfig | 1 + arch/arm64/configs/hulk_defconfig | 1 + arch/arm64/configs/openeuler_defconfig | 2 + arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/mmu.h | 4 +- arch/arm64/include/asm/pgtable-hwdef.h | 2 + arch/arm64/include/asm/pgtable-prot.h | 6 ++- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kernel/cpufeature.c | 61 ++++++++++++++++++++++++-- arch/arm64/kernel/setup.c | 7 +++ 11 files changed, 95 insertions(+), 9 deletions(-)
From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.5-rc3 commit 3e6c69a058deaa50d33c3dac36cde80b4ce590e8 category:feature bugzilla:NA CVE:NA
-------------------
Kernel Page Table Isolation (KPTI) is used to mitigate some speculation based security issues by ensuring that the kernel is not mapped when userspace is running but this approach is expensive and is incompatible with SPE. E0PD, introduced in the ARMv8.5 extensions, provides an alternative to this which ensures that accesses from userspace to the kernel's half of the memory map to always fault with constant time, preventing timing attacks without requiring constant unmapping and remapping or preventing legitimate accesses.
Currently this feature will only be enabled if all CPUs in the system support E0PD, if some CPUs do not support the feature at boot time then the feature will not be enabled and in the unlikely event that a late CPU is the first CPU to lack the feature then we will reject that CPU.
This initial patch does not yet integrate with KPTI, this will be dealt with in followup patches. Ideally we could ensure that by default we don't use KPTI on CPUs where E0PD is present.
Signed-off-by: Mark Brown broonie@kernel.org Reviewed-by: Suzuki K Poulose suzuki.poulose@arm.com [will: Fixed typo in Kconfig text] Signed-off-by: Will Deacon will@kernel.org
Conflicts: arch/arm64/include/asm/cpucaps.h arch/arm64/include/asm/sysreg.h arch/arm64/kernel/cpufeature.c
Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/Kconfig | 16 ++++++++++++++++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/pgtable-hwdef.h | 2 ++ arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kernel/cpufeature.c | 22 ++++++++++++++++++++++ 5 files changed, 43 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index af9f8eb3c827..1ef7862d1b75 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1334,6 +1334,22 @@ config ARCH_RANDOM
endmenu
+menu "ARMv8.5 architectural features" + +config ARM64_E0PD + bool "Enable support for E0PD" + default y + help + E0PD (part of the ARMv8.5 extensions) allows us to ensure + that EL0 accesses made via TTBR1 always fault in constant time, + providing similar benefits to KASLR as those provided by KPTI, but + with lower overhead and without disrupting legitimate access to + kernel memory such as SPE. + + This option enables E0PD for TTBR1 where available. + +endmenu + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 2e09cb7196d4..dd02f2e8d0f5 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -70,7 +70,8 @@ #define ARM64_HAS_DCPODP 49 #define ARM64_HAS_TLB_RANGE 50 #define ARM64_HAS_RNG 51 +#define ARM64_HAS_E0PD 52
-#define ARM64_NCAPS 52 +#define ARM64_NCAPS 53
#endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 6f1c187f1c86..fb563ba035c3 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -298,6 +298,8 @@ #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) #define TCR_NFD1 (UL(1) << 54) +#define TCR_E0PD0 (UL(1) << 55) +#define TCR_E0PD1 (UL(1) << 56)
/* * TTBR. diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 791aae473d06..c44465364785 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -670,6 +670,7 @@ #define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */ +#define ID_AA64MMFR2_E0PD_SHIFT 60 #define ID_AA64MMFR2_TTL_SHIFT 48 #define ID_AA64MMFR2_FWB_SHIFT 40 #define ID_AA64MMFR2_AT_SHIFT 32 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 1b3c9a23eff1..b1c408ef6ea3 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -251,6 +251,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { };
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), @@ -1326,6 +1327,14 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, return sprintf(buf, "Vulnerable\n"); }
+#ifdef CONFIG_ARM64_E0PD +static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) +{ + if (this_cpu_has_cap(ARM64_HAS_E0PD)) + sysreg_clear_set(tcr_el1, 0, TCR_E0PD1); +} +#endif /* CONFIG_ARM64_E0PD */ + #ifdef CONFIG_ARM64_PSEUDO_NMI static bool enable_pseudo_nmi;
@@ -1762,6 +1771,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .min_field_value = 1, }, +#endif +#ifdef CONFIG_ARM64_E0PD + { + .desc = "E0PD", + .capability = ARM64_HAS_E0PD, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64MMFR2_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64MMFR2_E0PD_SHIFT, + .matches = has_cpuid_feature, + .min_field_value = 1, + .cpu_enable = cpu_enable_e0pd, + }, #endif {}, };
From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.5-rc3 commit c2d92353b28f8542c6b3150900b38c94b1d61480 category:feature bugzilla:NA CVE:NA
-------------------
In preparation for integrating E0PD support with KASLR factor out the checks for interaction between KASLR and KPTI done in boot context into a new function kaslr_requires_kpti(), in the process clarifying the distinction between what we do in boot context and what we do at runtime.
Signed-off-by: Mark Brown broonie@kernel.org Reviewed-by: Suzuki K Poulose suzuki.poulose@arm.com Signed-off-by: Will Deacon will@kernel.org
Conflicts: arch/arm64/include/asm/mmu.h
Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/mmu.h | 49 ++++++++++++++++++++++++++++++++++ arch/arm64/kernel/cpufeature.c | 2 +- 2 files changed, 50 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 3e02efc9cdd0..3bfd07b1980c 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -56,6 +56,55 @@ static inline bool arm64_kernel_unmapped_at_el0(void) cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); }
+/* + * This check is triggered during the early boot before the cpufeature + * is initialised. Checking the status on the local CPU allows the boot + * CPU to detect the need for non-global mappings and thus avoiding a + * pagetable re-write after all the CPUs are booted. This check will be + * anyway run on individual CPUs, allowing us to get the consistent + * state once the SMP CPUs are up and thus make the switch to non-global + * mappings if required. + */ +static inline bool kaslr_requires_kpti(void) +{ + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return false; + + return kaslr_offset() > 0; +} + +static inline bool arm64_kernel_use_ng_mappings(void) +{ + /* What's a kpti? Use global mappings if we don't know. */ + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) + return false; + + /* + * Note: this function is called before the CPU capabilities have + * been configured, so our early mappings will be global. If we + * later determine that kpti is required, then + * kpti_install_ng_mappings() will make them non-global. + */ + if (arm64_kernel_unmapped_at_el0()) + return true; + + /* + * Once we are far enough into boot for capabilities to be + * ready we will have confirmed if we are using non-global + * mappings so don't need to consider anything else here. + */ + if (static_branch_likely(&arm64_const_caps_ready)) + return false; + + /* + * KASLR is enabled so we're going to be enabling kpti on non-broken + * CPUs regardless of their susceptibility to Meltdown. Rather + * than force everybody to go through the G -> nG dance later on, + * just put down non-global mappings from the beginning + */ + return kaslr_requires_kpti(); +} + typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data { diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b1c408ef6ea3..7bbd8809e15a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1062,7 +1062,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, }
/* Useful for KASLR robustness */ - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) { + if (kaslr_requires_kpti()) { if (!__kpti_forced) { str = "KASLR"; __kpti_forced = 1;
From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.5-rc3 commit 92ac6fd162b42628ebe50cc2f08d6a77759e7911 category:feature bugzilla:NA CVE:NA
-------------------
Since E0PD is intended to fulfil the same role as KPTI we don't need to use KPTI on CPUs where E0PD is available, we can rely on E0PD instead. Change the check that forces KPTI on when KASLR is enabled to check for E0PD before doing so, CPUs with E0PD are not expected to be affected by meltdown so should not need to enable KPTI for other reasons.
Since E0PD is a system capability we will still enable KPTI if any of the CPUs in the system lacks E0PD, this will rewrite any global mappings that were established in systems where some but not all CPUs support E0PD. We may transiently have a mix of global and non-global mappings while booting since we use the local CPU when deciding if KPTI will be required prior to completing CPU enumeration but any global mappings will be converted to non-global ones when KPTI is applied.
KPTI can still be forced on from the command line if required.
Signed-off-by: Mark Brown broonie@kernel.org Reviewed-by: Suzuki K Poulose suzuki.poulose@arm.com Signed-off-by: Will Deacon will@kernel.org
Conflicts: arch/arm64/include/asm/mmu.h
Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/mmu.h | 12 ++++++++++++ 1 file changed, 12 insertions(+)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 3bfd07b1980c..dc520b4e6b88 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -67,9 +67,21 @@ static inline bool arm64_kernel_unmapped_at_el0(void) */ static inline bool kaslr_requires_kpti(void) { + u64 ftr; + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) return false;
+ /* + * E0PD does a similar job to KPTI so can be used instead + * where available. + */ + if (IS_ENABLED(CONFIG_ARM64_E0PD)) { + ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); + if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf) + return false; + } + return kaslr_offset() > 0; }
From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.5-rc3 commit 09e3c22a86f6889db0e93fb29d9255081a126f64 category:feature bugzilla:NA CVE:NA
-------------------
Refactor the code which checks to see if we need to use non-global mappings to use a variable instead of checking with the CPU capabilities each time, doing the initial check for KPTI early in boot before we start allocating memory so we still avoid transitioning to non-global mappings in common cases.
Since this variable always matches our decision about non-global mappings this means we can also combine arm64_kernel_use_ng_mappings() and arm64_unmap_kernel_at_el0() into a single function, the variable simply stores the result and the decision code is elsewhere. We could just have the users check the variable directly but having a function makes it clear that these uses are read-only.
The result is that we simplify the code a bit and reduces the amount of code executed at runtime.
Signed-off-by: Mark Brown broonie@kernel.org Reviewed-by: Suzuki K Poulose suzuki.poulose@arm.com Signed-off-by: Will Deacon will@kernel.org
Conflicts: arch/arm64/kernel/cpufeature.c arch/arm64/include/asm/mmu.h
Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/mmu.h | 67 ++-------------------------------- arch/arm64/kernel/cpufeature.c | 37 +++++++++++++++++-- arch/arm64/kernel/setup.c | 7 ++++ 3 files changed, 45 insertions(+), 66 deletions(-)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index dc520b4e6b88..069ced02c8fa 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -50,71 +50,11 @@ extern int res_mem_count; */ #define ASID(mm) ((mm)->context.id.counter & 0xffff)
-static inline bool arm64_kernel_unmapped_at_el0(void) -{ - return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && - cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); -} +extern bool arm64_use_ng_mappings;
-/* - * This check is triggered during the early boot before the cpufeature - * is initialised. Checking the status on the local CPU allows the boot - * CPU to detect the need for non-global mappings and thus avoiding a - * pagetable re-write after all the CPUs are booted. This check will be - * anyway run on individual CPUs, allowing us to get the consistent - * state once the SMP CPUs are up and thus make the switch to non-global - * mappings if required. - */ -static inline bool kaslr_requires_kpti(void) -{ - u64 ftr; - - if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - return false; - - /* - * E0PD does a similar job to KPTI so can be used instead - * where available. - */ - if (IS_ENABLED(CONFIG_ARM64_E0PD)) { - ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); - if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf) - return false; - } - - return kaslr_offset() > 0; -} - -static inline bool arm64_kernel_use_ng_mappings(void) +static inline bool arm64_kernel_unmapped_at_el0(void) { - /* What's a kpti? Use global mappings if we don't know. */ - if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) - return false; - - /* - * Note: this function is called before the CPU capabilities have - * been configured, so our early mappings will be global. If we - * later determine that kpti is required, then - * kpti_install_ng_mappings() will make them non-global. - */ - if (arm64_kernel_unmapped_at_el0()) - return true; - - /* - * Once we are far enough into boot for capabilities to be - * ready we will have confirmed if we are using non-global - * mappings so don't need to consider anything else here. - */ - if (static_branch_likely(&arm64_const_caps_ready)) - return false; - - /* - * KASLR is enabled so we're going to be enabling kpti on non-broken - * CPUs regardless of their susceptibility to Meltdown. Rather - * than force everybody to go through the G -> nG dance later on, - * just put down non-global mappings from the beginning - */ - return kaslr_requires_kpti(); + return arm64_use_ng_mappings; }
typedef void (*bp_hardening_cb_t)(void); @@ -167,6 +107,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, pgprot_t prot, bool page_mappings_only); extern void *fixmap_remap_fdt(phys_addr_t dt_phys); extern void mark_linear_text_alias_ro(void); +extern bool kaslr_requires_kpti(void);
#endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7bbd8809e15a..191cb8beddda 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -56,6 +56,9 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM6 /* Need also bit for ARM64_CB_PATCH */ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+bool arm64_use_ng_mappings = false; +EXPORT_SYMBOL(arm64_use_ng_mappings); + /* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This @@ -1020,6 +1023,35 @@ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry, return ctr & BIT(CTR_DIC_SHIFT); }
+/* + * This check is triggered during the early boot before the cpufeature + * is initialised. Checking the status on the local CPU allows the boot + * CPU to detect the need for non-global mappings and thus avoiding a + * pagetable re-write after all the CPUs are booted. This check will be + * anyway run on individual CPUs, allowing us to get the consistent + * state once the SMP CPUs are up and thus make the switch to non-global + * mappings if required. + */ +bool kaslr_requires_kpti(void) +{ + u64 ftr; + + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return false; + + /* + * E0PD does a similar job to KPTI so can be used instead + * where available. + */ + if (IS_ENABLED(CONFIG_ARM64_E0PD)) { + ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); + if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf) + return false; + } + + return kaslr_offset() > 0; +} + static bool __meltdown_safe = true; static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
@@ -1120,10 +1152,9 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) extern kpti_remap_fn idmap_kpti_install_ng_mappings; kpti_remap_fn *remap_fn;
- static bool kpti_applied = false; int cpu = smp_processor_id();
- if (kpti_applied) + if (arm64_use_ng_mappings) return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); @@ -1133,7 +1164,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) cpu_uninstall_idmap();
if (!cpu) - kpti_applied = true; + arm64_use_ng_mappings = true;
return; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b3569d16af19..aa499eb2a923 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -308,6 +308,13 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
+ /* + * If know now we are going to need KPTI then use non-global + * mappings from the start, avoiding the cost of rewriting + * everything later. + */ + arm64_use_ng_mappings = kaslr_requires_kpti(); + early_fixmap_init(); early_ioremap_init();
From: Chen Jun chenjun102@huawei.com
hulk inclusion category:feature bugzilla:NA CVE:NA
-------------------
Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/configs/euleros_defconfig | 1 + arch/arm64/configs/hulk_defconfig | 1 + arch/arm64/configs/openeuler_defconfig | 2 ++ 3 files changed, 4 insertions(+)
diff --git a/arch/arm64/configs/euleros_defconfig b/arch/arm64/configs/euleros_defconfig index 185b9e5be848..09c604738074 100644 --- a/arch/arm64/configs/euleros_defconfig +++ b/arch/arm64/configs/euleros_defconfig @@ -484,6 +484,7 @@ CONFIG_ARM64_CNP=n # ARMv8.5 architectural features # # CONFIG_ARCH_RANDOM is not set +# CONFIG_ARM64_E0PD is not set
# # Boot options diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index 85a27dd2bcb2..9952d0b89744 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -499,6 +499,7 @@ CONFIG_ARM64_TLB_RANGE=y # ARMv8.5 architectural features # CONFIG_ARCH_RANDOM=y +CONFIG_ARM64_E0PD=y
# # Boot options diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index b02f45242358..beb292fa9b47 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -494,6 +494,8 @@ CONFIG_ARM64_TLB_RANGE=y # ARMv8.5 architectural features # CONFIG_ARCH_RANDOM=y +CONFIG_ARM64_E0PD=y +
# # Boot options
From: Will Deacon will@kernel.org
mainline inclusion from mainline-v5.6-rc1 commit c83557859eaa1286330a4d3d2e1ea0c0988c4604 category:bugfix bugzilla:NA CVE:NA
-------------------
Enabling KASLR forces the use of non-global page-table entries for kernel mappings, as this is a decision that we have to make very early on before mapping the kernel proper. When used in conjunction with the "kpti=off" command-line option, it is possible to use non-global kernel mappings but with the kpti trampoline disabled.
Since commit 09e3c22a86f6 ("arm64: Use a variable to store non-global mappings decision"), arm64_kernel_unmapped_at_el0() reflects only the use of non-global mappings and does not take into account whether the kpti trampoline is enabled. This breaks context switching of the TPIDRRO_EL0 register for 64-bit tasks, where the clearing of the register is deferred to the ret-to-user code, but it also breaks the ARM SPE PMU driver which helpfully recommends passing "kpti=off" on the command line!
Report whether or not KPTI is actually enabled in arm64_kernel_unmapped_at_el0() and check the 'arm64_use_ng_mappings' global variable directly when determining the protection flags for kernel mappings.
Cc: Mark Brown broonie@kernel.org Reported-by: Hongbo Yao yaohongbo@huawei.com Tested-by: Hongbo Yao yaohongbo@huawei.com Fixes: 09e3c22a86f6 ("arm64: Use a variable to store non-global mappings decision") Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/mmu.h | 4 +--- arch/arm64/include/asm/pgtable-prot.h | 6 ++++-- 2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 069ced02c8fa..c240609dba34 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -50,11 +50,9 @@ extern int res_mem_count; */ #define ASID(mm) ((mm)->context.id.counter & 0xffff)
-extern bool arm64_use_ng_mappings; - static inline bool arm64_kernel_unmapped_at_el0(void) { - return arm64_use_ng_mappings; + return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); }
typedef void (*bp_hardening_cb_t)(void); diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index af547be1779b..0dab15c2ca86 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -34,11 +34,13 @@
#include <asm/pgtable-types.h>
+extern bool arm64_use_ng_mappings; + #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) -#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) +#define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)