Guenter Roeck (1): arm64: kaslr: Use standard early random function
Linus Torvalds (1): random: random.h should include archrandom.h, not the other way around
Mark Brown (3): arm64: kaslr: Announce KASLR status on boot arm64: kaslr: Check command line before looking for a seed arm64: Use v8.5-RNG entropy for KASLR seed
Mark Rutland (2): arm64: add credited/trusted RNG support random: add arch_get_random_*long_early()
Richard Henderson (1): arm64: Implement archrandom.h for ARMv8.5-RNG
Robin Murphy (1): arm64: Fix CONFIG_ARCH_RANDOM=n build
Yang Yingliang (1): config: set default value of CONFIG_ARCH_RANDOM
Documentation/arm64/cpu-feature-registers.txt | 2 + Documentation/arm64/elf_hwcaps.txt | 5 ++ arch/arm64/Kconfig | 12 +++ arch/arm64/configs/euleros_defconfig | 5 ++ arch/arm64/configs/hulk_defconfig | 5 ++ arch/arm64/configs/openeuler_defconfig | 5 ++ arch/arm64/include/asm/archrandom.h | 88 +++++++++++++++++++ arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/include/asm/sysreg.h | 4 + arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 14 +++ arch/arm64/kernel/cpuinfo.c | 1 + arch/arm64/kernel/kaslr.c | 54 +++++++++++- include/linux/random.h | 22 +++++ 15 files changed, 217 insertions(+), 5 deletions(-) create mode 100644 arch/arm64/include/asm/archrandom.h
From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.4-rc3 commit 294a9ddde6cdbf931a28b8c8c928d3f799b61cb5 category:feature bugzilla:NA CVE:NA
--------------------------------
Currently the KASLR code is silent at boot unless it forces on KPTI in which case a message will be printed for that. This can lead to users incorrectly believing their system has the feature enabled when it in fact does not, and if they notice the problem the lack of any diagnostics makes it harder to understand the problem. Add an initcall which prints a message showing the status of KASLR during boot to make the status clear.
This is particularly useful in cases where we don't have a seed. It seems to be a relatively common error for system integrators and administrators to enable KASLR in their configuration but not provide the seed at runtime, often due to seed provisioning breaking at some later point after it is initially enabled and verified.
Signed-off-by: Mark Brown broonie@kernel.org Acked-by: Mark Rutland mark.rutland@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/kernel/kaslr.c | 41 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 06941c1fe418..81a8cf3b1598 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -22,6 +22,14 @@ #include <asm/pgtable.h> #include <asm/sections.h>
+enum kaslr_status { + KASLR_ENABLED, + KASLR_DISABLED_CMDLINE, + KASLR_DISABLED_NO_SEED, + KASLR_DISABLED_FDT_REMAP, +}; + +enum kaslr_status __ro_after_init kaslr_status; u64 __ro_after_init module_alloc_base; u16 __initdata memstart_offset_seed;
@@ -97,15 +105,19 @@ u64 __init kaslr_early_init(u64 dt_phys) */ early_fixmap_init(); fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); - if (!fdt) + if (!fdt) { + kaslr_status = KASLR_DISABLED_FDT_REMAP; return 0; + }
/* * Retrieve (and wipe) the seed from the FDT */ seed = get_kaslr_seed(fdt); - if (!seed) + if (!seed) { + kaslr_status = KASLR_DISABLED_NO_SEED; return 0; + }
/* * Check if 'nokaslr' appears on the command line, and @@ -113,8 +125,10 @@ u64 __init kaslr_early_init(u64 dt_phys) */ cmdline = kaslr_get_cmdline(fdt); str = strstr(cmdline, "nokaslr"); - if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) + if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) { + kaslr_status = KASLR_DISABLED_CMDLINE; return 0; + }
/* * OK, so we are proceeding with KASLR enabled. Calculate a suitable @@ -176,3 +190,24 @@ u64 __init kaslr_early_init(u64 dt_phys)
return offset; } + +static int __init kaslr_init(void) +{ + switch (kaslr_status) { + case KASLR_ENABLED: + pr_info("KASLR enabled\n"); + break; + case KASLR_DISABLED_CMDLINE: + pr_info("KASLR disabled on command line\n"); + break; + case KASLR_DISABLED_NO_SEED: + pr_warn("KASLR disabled due to lack of seed\n"); + break; + case KASLR_DISABLED_FDT_REMAP: + pr_warn("KASLR disabled due to FDT remapping failure\n"); + break; + } + + return 0; +} +core_initcall(kaslr_init)
From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.4-rc3 commit 2203e1adb936a92ab2fd8f705e888af322462736 category:feature bugzilla:NA CVE:NA
--------------------------------
Now that we print diagnostics at boot the reason why we do not initialise KASLR matters. Currently we check for a seed before we check if the user has explicitly disabled KASLR on the command line which will result in misleading diagnostics so reverse the order of those checks. We still parse the seed from the DT early so that if the user has both provided a seed and disabled KASLR on the command line we still mask the seed on the command line.
Signed-off-by: Mark Brown broonie@kernel.org Acked-by: Mark Rutland mark.rutland@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/kernel/kaslr.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 81a8cf3b1598..415ae2e512aa 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -29,7 +29,7 @@ enum kaslr_status { KASLR_DISABLED_FDT_REMAP, };
-enum kaslr_status __ro_after_init kaslr_status; +static enum kaslr_status __initdata kaslr_status; u64 __ro_after_init module_alloc_base; u16 __initdata memstart_offset_seed;
@@ -114,10 +114,6 @@ u64 __init kaslr_early_init(u64 dt_phys) * Retrieve (and wipe) the seed from the FDT */ seed = get_kaslr_seed(fdt); - if (!seed) { - kaslr_status = KASLR_DISABLED_NO_SEED; - return 0; - }
/* * Check if 'nokaslr' appears on the command line, and @@ -130,6 +126,11 @@ u64 __init kaslr_early_init(u64 dt_phys) return 0; }
+ if (!seed) { + kaslr_status = KASLR_DISABLED_NO_SEED; + return 0; + } + /* * OK, so we are proceeding with KASLR enabled. Calculate a suitable * kernel image offset from the seed. Let's place the kernel in the
From: Richard Henderson richard.henderson@linaro.org
mainline inclusion from mainline-v5.5-rc3 commit 1a50ec0b3b2e9a83f1b1245ea37a853aac2f741c category:feature bugzilla:NA CVE:NA
-------------------
Expose the ID_AA64ISAR0.RNDR field to userspace, as the RNG system registers are always available at EL0.
Implement arch_get_random_seed_long using RNDR. Given that the TRNG is likely to be a shared resource between cores, and VMs, do not explicitly force re-seeding with RNDRRS. In order to avoid code complexity and potential issues with hetrogenous systems only provide values after cpufeature has finalized the system capabilities.
Signed-off-by: Richard Henderson richard.henderson@linaro.org [Modified to only function after cpufeature has finalized the system capabilities and move all the code into the header -- broonie] Signed-off-by: Mark Brown broonie@kernel.org Reviewed-by: Mark Rutland mark.rutland@arm.com Reviewed-by: Ard Biesheuvel ardb@kernel.org [will: Advertise HWCAP via /proc/cpuinfo] Signed-off-by: Will Deacon will@kernel.org
Conflicts: Documentation/arm64/cpu-feature-registers.rst Documentation/arm64/elf_hwcaps.rst arch/arm64/include/asm/cpucaps.h arch/arm64/include/asm/hwcap.h arch/arm64/include/asm/sysreg.h arch/arm64/include/uapi/asm/hwcap.h arch/arm64/kernel/cpufeature.c arch/arm64/kernel/cpuinfo.c
Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- Documentation/arm64/cpu-feature-registers.txt | 2 + Documentation/arm64/elf_hwcaps.txt | 5 ++ arch/arm64/Kconfig | 12 ++++ arch/arm64/include/asm/archrandom.h | 67 +++++++++++++++++++ arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/hwcap.h | 1 + arch/arm64/include/asm/sysreg.h | 4 ++ arch/arm64/include/uapi/asm/hwcap.h | 1 + arch/arm64/kernel/cpufeature.c | 14 ++++ arch/arm64/kernel/cpuinfo.c | 1 + 10 files changed, 109 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/archrandom.h
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt index e7deb1771157..cc2a6f84a058 100644 --- a/Documentation/arm64/cpu-feature-registers.txt +++ b/Documentation/arm64/cpu-feature-registers.txt @@ -110,6 +110,8 @@ infrastructure: x--------------------------------------------------x | Name | bits | visible | |--------------------------------------------------| + | RNDR | [63-60] | y | + |--------------------------------------------------| | TS | [55-52] | y | |--------------------------------------------------| | FHM | [51-48] | y | diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt index 2bb7c1e827d8..aef3cd29cf74 100644 --- a/Documentation/arm64/elf_hwcaps.txt +++ b/Documentation/arm64/elf_hwcaps.txt @@ -259,6 +259,11 @@ HWCAP2_DGH
Functionality implied by ID_AA64ISAR1_EL1.DGH == 0b0001.
+HWCAP2_RNG + + Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001. + + 4. Unused AT_HWCAP bits -----------------------
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0c36a0b040e8..e2a9aa12806c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1322,6 +1322,18 @@ config ARM64_TLB_RANGE
endmenu
+menu "ARMv8.5 architectural features" + +config ARCH_RANDOM + bool "Enable support for random number generation" + default y + help + Random number generation (part of the ARMv8.5 Extensions) + provides a high bandwidth, cryptographically secure + hardware random number generator. + +endmenu + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h new file mode 100644 index 000000000000..5ea5a1ce5a5f --- /dev/null +++ b/arch/arm64/include/asm/archrandom.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARCHRANDOM_H +#define _ASM_ARCHRANDOM_H + +#ifdef CONFIG_ARCH_RANDOM + +#include <linux/random.h> +#include <asm/cpufeature.h> + +static inline bool __arm64_rndr(unsigned long *v) +{ + bool ok; + + /* + * Reads of RNDR set PSTATE.NZCV to 0b0000 on success, + * and set PSTATE.NZCV to 0b0100 otherwise. + */ + asm volatile( + __mrs_s("%0", SYS_RNDR_EL0) "\n" + " cset %w1, ne\n" + : "=r" (*v), "=r" (ok) + : + : "cc"); + + return ok; +} + +static inline bool __must_check arch_get_random_long(unsigned long *v) +{ + return false; +} + +static inline bool __must_check arch_get_random_int(unsigned int *v) +{ + return false; +} + +static inline bool __must_check arch_get_random_seed_long(unsigned long *v) +{ + /* + * Only support the generic interface after we have detected + * the system wide capability, avoiding complexity with the + * cpufeature code and with potential scheduling between CPUs + * with and without the feature. + */ + if (!cpus_have_const_cap(ARM64_HAS_RNG)) + return false; + + return __arm64_rndr(v); +} + + +static inline bool __must_check arch_get_random_seed_int(unsigned int *v) +{ + unsigned long val; + bool ok = arch_get_random_seed_long(&val); + + *v = val; + return ok; +} + +#else + +static inline bool __arm64_rndr(unsigned long *v) { return false; } + +#endif /* CONFIG_ARCH_RANDOM */ +#endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index d805e2369517..2e09cb7196d4 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -69,7 +69,8 @@ #define ARM64_HAS_ARMv8_4_TTL 48 #define ARM64_HAS_DCPODP 49 #define ARM64_HAS_TLB_RANGE 50 +#define ARM64_HAS_RNG 51
-#define ARM64_NCAPS 51 +#define ARM64_NCAPS 52
#endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 5b1b28b9cb4c..57d83fbd72dc 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -104,6 +104,7 @@ #define KERNEL_HWCAP_I8MM __khwcap2_feature(I8MM) #define KERNEL_HWCAP_DGH __khwcap2_feature(DGH) #define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16) +#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
/* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 4fb64c78452d..791aae473d06 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -364,6 +364,9 @@ #define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) #define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
+#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) +#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) + #define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) #define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) @@ -532,6 +535,7 @@ #endif
/* id_aa64isar0 */ +#define ID_AA64ISAR0_RNDR_SHIFT 60 #define ID_AA64ISAR0_TLB_SHIFT 56 #define ID_AA64ISAR0_TS_SHIFT 52 #define ID_AA64ISAR0_FHM_SHIFT 48 diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index e6dad5924703..7752d93bb50f 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -72,5 +72,6 @@ #define HWCAP2_I8MM (1 << 13) #define HWCAP2_BF16 (1 << 14) #define HWCAP2_DGH (1 << 15) +#define HWCAP2_RNG (1 << 16)
#endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 081d52be6dd1..1b3c9a23eff1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -130,6 +130,7 @@ static bool __system_matches_cap(unsigned int n); * sync with the documentation of the CPU feature register ABI. */ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), @@ -1750,6 +1751,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_cnp, }, #endif /* CONFIG_ARM64_CNP */ +#ifdef CONFIG_ARCH_RANDOM + { + .desc = "Random Number Generator", + .capability = ARM64_HAS_RNG, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR0_EL1, + .field_pos = ID_AA64ISAR0_RNDR_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, +#endif {}, };
@@ -1827,6 +1840,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 39122d40d3d8..b690db801a55 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -101,6 +101,7 @@ static const char *const hwcap_str[] = { "i8mm", "bf16", "dgh", + "rng", NULL };
From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.5-rc3 commit 2e8e1ea88cbcb19a77b7acb67f6ffe39cc15740c category:feature bugzilla:NA CVE:NA
-------------------
When seeding KALSR on a system where we have architecture level random number generation make use of that entropy, mixing it in with the seed passed by the bootloader. Since this is run very early in init before feature detection is complete we open code rather than use archrandom.h.
Signed-off-by: Mark Brown broonie@kernel.org Reviewed-by: Mark Rutland mark.rutland@arm.com Reviewed-by: Ard Biesheuvel ardb@kernel.org Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/archrandom.h | 8 ++++++++ arch/arm64/kernel/kaslr.c | 11 +++++++++++ 2 files changed, 19 insertions(+)
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index 5ea5a1ce5a5f..3fe02da70004 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -59,9 +59,17 @@ static inline bool __must_check arch_get_random_seed_int(unsigned int *v) return ok; }
+static inline bool __init __early_cpu_has_rndr(void) +{ + /* Open code as we run prior to the first call to cpufeature. */ + unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1); + return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf; +} + #else
static inline bool __arm64_rndr(unsigned long *v) { return false; } +static inline bool __init __early_cpu_has_rndr(void) { return false; }
#endif /* CONFIG_ARCH_RANDOM */ #endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 415ae2e512aa..49146e5ccffe 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -126,6 +126,17 @@ u64 __init kaslr_early_init(u64 dt_phys) return 0; }
+ /* + * Mix in any entropy obtainable architecturally, open coded + * since this runs extremely early. + */ + if (__early_cpu_has_rndr()) { + unsigned long raw; + + if (__arm64_rndr(&raw)) + seed ^= raw; + } + if (!seed) { kaslr_status = KASLR_DISABLED_NO_SEED; return 0;
From: Robin Murphy robin.murphy@arm.com
mainline inclusion from mainline-v5.6-rc1 commit 74a44bed8d93782affb707a33469bda7052b4207 category:bugfix bugzilla:NA CVE:NA
-------------------
The entire asm/archrandom.h header is generically included via linux/archrandom.h only when CONFIG_ARCH_RANDOM is already set, so the stub definitions of __arm64_rndr() and __early_cpu_has_rndr() are only visible to KASLR if it explicitly includes the arch-internal header.
Acked-by: Mark Brown broonie@kernel.org Signed-off-by: Robin Murphy robin.murphy@arm.com Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/kernel/kaslr.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 49146e5ccffe..291f53f29a4d 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/types.h>
+#include <asm/archrandom.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> #include <asm/kernel-pgtable.h>
From: Mark Rutland mark.rutland@arm.com
mainline inclusion from mainline-v5.6-rc3 commit ead5084cdf5af51445d219800c2ac8b01eb85f2f category:bugfix bugzilla:NA CVE:NA
-------------------
Currently arm64 doesn't initialize the primary CRNG in a (potentially) trusted manner as we only detect the presence of the RNG once secondary CPUs are up.
Now that the core RNG code distinguishes the early initialization of the primary CRNG, we can implement arch_get_random_seed_long_early() to support this.
This patch does so.
Signed-off-by: Mark Rutland mark.rutland@arm.com Cc: Catalin Marinas catalin.marinas@arm.com Cc: Mark Brown broonie@kernel.org Cc: Theodore Ts'o tytso@mit.edu Cc: Will Deacon will@kernel.org Link: https://lore.kernel.org/r/20200210130015.17664-4-mark.rutland@arm.com Signed-off-by: Theodore Ts'o tytso@mit.edu Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/archrandom.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index 3fe02da70004..fc1594a0710e 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -4,6 +4,8 @@
#ifdef CONFIG_ARCH_RANDOM
+#include <linux/bug.h> +#include <linux/kernel.h> #include <linux/random.h> #include <asm/cpufeature.h>
@@ -66,6 +68,18 @@ static inline bool __init __early_cpu_has_rndr(void) return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf; }
+static inline bool __init __must_check +arch_get_random_seed_long_early(unsigned long *v) +{ + WARN_ON(system_state != SYSTEM_BOOTING); + + if (!__early_cpu_has_rndr()) + return false; + + return __arm64_rndr(v); +} +#define arch_get_random_seed_long_early arch_get_random_seed_long_early + #else
static inline bool __arm64_rndr(unsigned long *v) { return false; }
From: Linus Torvalds torvalds@linux-foundation.org
mainline inclusion from mainline-v5.8 commit 585524081ecdcde1c719e63916c514866d898217 category:bugfix bugzilla:NA CVE:NA
-------------------
This is hopefully the final piece of the crazy puzzle with random.h dependencies.
And by "hopefully" I obviously mean "Linus is a hopeless optimist".
Reported-and-tested-by: Daniel Díaz daniel.diaz@linaro.org Acked-by: Guenter Roeck linux@roeck-us.net Signed-off-by: Linus Torvalds torvalds@linux-foundation.org
Conflicts: arch/arm64/kernel/kaslr.c
Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/archrandom.h | 1 - arch/arm64/kernel/kaslr.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index fc1594a0710e..44209f6146aa 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -6,7 +6,6 @@
#include <linux/bug.h> #include <linux/kernel.h> -#include <linux/random.h> #include <asm/cpufeature.h>
static inline bool __arm64_rndr(unsigned long *v) diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 291f53f29a4d..ab629f564043 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -13,8 +13,8 @@ #include <linux/mm_types.h> #include <linux/sched.h> #include <linux/types.h> +#include <linux/random.h>
-#include <asm/archrandom.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> #include <asm/kernel-pgtable.h>
From: Mark Rutland mark.rutland@arm.com
mainline inclusion from mainline-v5.6 commit 253d3194c2b58152fe830fd27c2fd83ebc6fe5ee category:bugfix bugzilla:NA CVE:NA
-------------------
Some architectures (e.g. arm64) can have heterogeneous CPUs, and the boot CPU may be able to provide entropy while secondary CPUs cannot. On such systems, arch_get_random_long() and arch_get_random_seed_long() will fail unless support for RNG instructions has been detected on all CPUs. This prevents the boot CPU from being able to provide (potentially) trusted entropy when seeding the primary CRNG.
To make it possible to seed the primary CRNG from the boot CPU without adversely affecting the runtime versions of arch_get_random_long() and arch_get_random_seed_long(), this patch adds new early versions of the functions used when initializing the primary CRNG.
Default implementations are provided atop of the existing arch_get_random_long() and arch_get_random_seed_long() so that only architectures with such constraints need to provide the new helpers.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland mark.rutland@arm.com Cc: Mark Brown broonie@kernel.org Cc: Theodore Ts'o tytso@mit.edu Link: https://lore.kernel.org/r/20200210130015.17664-3-mark.rutland@arm.com Signed-off-by: Theodore Ts'o tytso@mit.edu
Conflicts: drivers/char/random.c include/linux/random.h
Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/random.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+)
diff --git a/include/linux/random.h b/include/linux/random.h index 37209b3b22ae..efcbf8de6bfb 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -7,6 +7,8 @@ #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H
+#include <linux/bug.h> +#include <linux/kernel.h> #include <linux/list.h> #include <linux/once.h>
@@ -142,4 +144,24 @@ static inline bool arch_has_random_seed(void) } #endif
+/* + * Called from the boot CPU during startup; not valid to call once + * secondary CPUs are up and preemption is possible. + */ +#ifndef arch_get_random_seed_long_early +static inline bool __init arch_get_random_seed_long_early(unsigned long *v) +{ + WARN_ON(system_state != SYSTEM_BOOTING); + return arch_get_random_seed_long(v); +} +#endif + +#ifndef arch_get_random_long_early +static inline bool __init arch_get_random_long_early(unsigned long *v) +{ + WARN_ON(system_state != SYSTEM_BOOTING); + return arch_get_random_long(v); +} +#endif + #endif /* _LINUX_RANDOM_H */
From: Guenter Roeck linux@roeck-us.net
mainline inclusion from mainline-v5.8 commit 9bceb80b3cc483e6763c39a4928402fa82815d3e category:bugfix bugzilla:NA CVE:NA
-------------------
Commit 585524081ecd ("random: random.h should include archrandom.h, not the other way around") tries to fix a problem with recursive inclusion of linux/random.h and arch/archrandom.h for arm64. Unfortunately, this results in the following compile error if ARCH_RANDOM is disabled.
arch/arm64/kernel/kaslr.c: In function 'kaslr_early_init': arch/arm64/kernel/kaslr.c:128:6: error: implicit declaration of function '__early_cpu_has_rndr'; did you mean '__early_pfn_to_nid'? [-Werror=implicit-function-declaration] if (__early_cpu_has_rndr()) { ^~~~~~~~~~~~~~~~~~~~ __early_pfn_to_nid arch/arm64/kernel/kaslr.c:131:7: error: implicit declaration of function '__arm64_rndr' [-Werror=implicit-function-declaration] if (__arm64_rndr(&raw)) ^~~~~~~~~~~~
The problem is that arch/archrandom.h is only included from linux/random.h if ARCH_RANDOM is enabled. If not, __arm64_rndr() and __early_cpu_has_rndr() are undeclared, causing the problem.
Use arch_get_random_seed_long_early() instead of arm64 specific functions to solve the problem.
Reported-by: Qian Cai cai@lca.pw Fixes: 585524081ecd ("random: random.h should include archrandom.h, not the other way around") Cc: Qian Cai cai@lca.pw Cc: Mark Brown broonie@kernel.org Reviewed-by: Mark Rutland mark.rutland@arm.com Reviewed-by: Mark Brown broonie@kernel.org Tested-by: Mark Brown broonie@kernel.org Signed-off-by: Guenter Roeck linux@roeck-us.net Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/kernel/kaslr.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index ab629f564043..5db9228e555c 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -90,6 +90,7 @@ u64 __init kaslr_early_init(u64 dt_phys) void *fdt; u64 seed, offset, mask, module_range; const u8 *cmdline, *str; + unsigned long raw; int size;
/* @@ -128,15 +129,12 @@ u64 __init kaslr_early_init(u64 dt_phys) }
/* - * Mix in any entropy obtainable architecturally, open coded - * since this runs extremely early. + * Mix in any entropy obtainable architecturally if enabled + * and supported. */ - if (__early_cpu_has_rndr()) { - unsigned long raw;
- if (__arm64_rndr(&raw)) - seed ^= raw; - } + if (arch_get_random_seed_long_early(&raw)) + seed ^= raw;
if (!seed) { kaslr_status = KASLR_DISABLED_NO_SEED;
hulk inclusion category:feature bugzilla:NA CVE:NA
-------------------
set default value of CONFIG_ARCH_RANDOM
Signed-off-by: Chen Jun chenjun102@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/configs/euleros_defconfig | 5 +++++ arch/arm64/configs/hulk_defconfig | 5 +++++ arch/arm64/configs/openeuler_defconfig | 5 +++++ 3 files changed, 15 insertions(+)
diff --git a/arch/arm64/configs/euleros_defconfig b/arch/arm64/configs/euleros_defconfig index db84fb568dfd..185b9e5be848 100644 --- a/arch/arm64/configs/euleros_defconfig +++ b/arch/arm64/configs/euleros_defconfig @@ -480,6 +480,11 @@ CONFIG_ARM64_CNP=n # # CONFIG_ARM64_TLB_RANGE is not set
+# +# ARMv8.5 architectural features +# +# CONFIG_ARCH_RANDOM is not set + # # Boot options # diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index fe2006e0eaaa..8087f280632e 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -494,6 +494,11 @@ CONFIG_AS_HAS_PAC=y # CONFIG_ARM64_TLB_RANGE=y
+# +# ARMv8.5 architectural features +# +CONFIG_ARCH_RANDOM=y + # # Boot options # diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 711a76c3e83a..b02f45242358 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -490,6 +490,11 @@ CONFIG_AS_HAS_PAC=y # CONFIG_ARM64_TLB_RANGE=y
+# +# ARMv8.5 architectural features +# +CONFIG_ARCH_RANDOM=y + # # Boot options #