
From: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> mainline inclusion from mainline-v6.15-rc1 commit e3121298c7fcaf488df8e61f50fced9a741c4c44 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBN3WI Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h... ---------------------------------------------------------------------- These changes lay the groundwork for adding support for guest kernels, allowing them to leverage target CPU implementations provided by the VMM. No functional changes intended. Suggested-by: Oliver Upton <oliver.upton@linux.dev> Reviewed-by: Sebastian Ott <sebott@redhat.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20250221140229.12588-2-shameerali.kolothum.thodi@h... Signed-off-by: Oliver Upton <oliver.upton@linux.dev> --- arch/arm64/include/asm/cache.h | 2 +- arch/arm64/include/asm/cputype.h | 28 ++++++++++++++-------------- arch/arm64/kernel/cpu_errata.c | 25 ++++++++++++++----------- arch/arm64/kernel/cpufeature.c | 9 ++++----- arch/arm64/kernel/image-vars.h | 4 +++- arch/arm64/kernel/proton-pack.c | 20 ++++++++++---------- arch/arm64/kvm/vgic/vgic-v3.c | 2 +- drivers/clocksource/arm_arch_timer.c | 2 +- drivers/perf/arm_pmuv3.c | 2 +- 9 files changed, 49 insertions(+), 45 deletions(-) diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 8455df351ef8..01dabe4c41fa 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -124,7 +124,7 @@ static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void) MIDR_REV(MIDR_HISI_LINXICORE9100, 1, 0), { /* sentinel */ } }; - if (is_midr_in_range_list(read_cpuid_id(), idc_support_list)) + if (is_midr_in_range_list(idc_support_list)) ctr |= BIT(CTR_EL0_IDC_SHIFT); #endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 387a4af3f4e4..d6744cc36d60 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -255,6 +255,16 @@ #define read_cpuid(reg) read_sysreg_s(SYS_ ## reg) +/* + * The CPU ID never changes at run time, so we might as well tell the + * compiler that it's constant. Use this function to read the CPU ID + * rather than directly reading processor_id or read_cpuid() directly. + */ +static inline u32 __attribute_const__ read_cpuid_id(void) +{ + return read_cpuid(MIDR_EL1); +} + /* * Represent a range of MIDR values for a given CPU model and a * range of variant/revision values. @@ -290,31 +300,21 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min, return _model == model && rv >= rv_min && rv <= rv_max; } -static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) +static inline bool is_midr_in_range(struct midr_range const *range) { - return midr_is_cpu_model_range(midr, range->model, + return midr_is_cpu_model_range(read_cpuid_id(), range->model, range->rv_min, range->rv_max); } static inline bool -is_midr_in_range_list(u32 midr, struct midr_range const *ranges) +is_midr_in_range_list(struct midr_range const *ranges) { while (ranges->model) - if (is_midr_in_range(midr, ranges++)) + if (is_midr_in_range(ranges++)) return true; return false; } -/* - * The CPU ID never changes at run time, so we might as well tell the - * compiler that it's constant. Use this function to read the CPU ID - * rather than directly reading processor_id or read_cpuid() directly. - */ -static inline u32 __attribute_const__ read_cpuid_id(void) -{ - return read_cpuid(MIDR_EL1); -} - static inline u64 __attribute_const__ read_cpuid_mpidr(void) { return read_cpuid(MPIDR_EL1); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 25bf46c7ce51..5617f4767c2f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -20,30 +20,34 @@ #endif static bool __maybe_unused -is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) +__is_affected_midr_range(const struct arm64_cpu_capabilities *entry, + u32 midr, u32 revidr) { const struct arm64_midr_revidr *fix; - u32 midr = read_cpuid_id(), revidr; - - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - if (!is_midr_in_range(midr, &entry->midr_range)) + if (!is_midr_in_range(&entry->midr_range)) return false; midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; - revidr = read_cpuid(REVIDR_EL1); for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) return false; - return true; } +static bool __maybe_unused +is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) +{ + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return __is_affected_midr_range(entry, read_cpuid_id(), + read_cpuid(REVIDR_EL1)); +} + static bool __maybe_unused is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, int scope) { WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); + return is_midr_in_range_list(entry->midr_range_list); } static bool __maybe_unused @@ -80,7 +84,7 @@ hisilicon_1980005_match(const struct arm64_cpu_capabilities *entry, { /* sentinel */ } }; - return is_midr_in_range_list(read_cpuid_id(), idc_support_list); + return is_midr_in_range_list(idc_support_list); } static void @@ -282,12 +286,11 @@ static bool __maybe_unused has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, int scope) { - u32 midr = read_cpuid_id(); bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT); const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return is_midr_in_range(midr, &range) && has_dic; + return is_midr_in_range(&range) && has_dic; } #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 672accbd5b4a..e898553f75f1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1687,8 +1687,7 @@ bool kaslr_requires_kpti(void) if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { extern const struct midr_range cavium_erratum_27456_cpus[]; - if (is_midr_in_range_list(read_cpuid_id(), - cavium_erratum_27456_cpus)) + if (is_midr_in_range_list(cavium_erratum_27456_cpus)) return false; } @@ -1723,7 +1722,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, char const *str = "kpti command line option"; bool meltdown_safe; - meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list); + meltdown_safe = is_midr_in_range_list(kpti_safe_list); /* Defer to CPU feature registers */ if (has_cpuid_feature(entry, scope)) @@ -1900,7 +1899,7 @@ static bool cpu_has_broken_dbm(void) {}, }; - return is_midr_in_range_list(read_cpuid_id(), cpus); + return is_midr_in_range_list(cpus); } static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap) @@ -2441,7 +2440,7 @@ static bool is_arch_xcall_xint_support(void) { /* sentinel */ } }; - if (is_midr_in_range_list(read_cpuid_id(), xcall_xint_cpus)) + if (is_midr_in_range_list(xcall_xint_cpus)) return true; return false; diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index ef942592d767..badf98c98744 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -22,7 +22,9 @@ PROVIDE(__efistub_primary_entry = primary_entry); * position independent manner */ PROVIDE(__efistub_caches_clean_inval_pou = __pi_caches_clean_inval_pou); - +#ifdef CONFIG_HISILICON_ERRATUM_1980005 +PROVIDE(__efistub_is_midr_in_range_list = is_midr_in_range_list); +#endif PROVIDE(__efistub__text = _text); PROVIDE(__efistub__end = _end); PROVIDE(__efistub___inittext_end = __inittext_end); diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index fc03252859af..9f3f362b9aa7 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -172,7 +172,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) return SPECTRE_UNAFFECTED; /* Alternatively, we have a list of unaffected CPUs */ - if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) + if (is_midr_in_range_list(spectre_v2_safe_list)) return SPECTRE_UNAFFECTED; return SPECTRE_VULNERABLE; @@ -331,7 +331,7 @@ bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope) }; WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list); + return is_midr_in_range_list(spectre_v3a_unsafe_list); } void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused) @@ -475,7 +475,7 @@ static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void) { /* sentinel */ }, }; - if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list)) + if (is_midr_in_range_list(spectre_v4_safe_list)) return SPECTRE_UNAFFECTED; /* CPU features are detected first */ @@ -864,7 +864,7 @@ static bool is_spectre_bhb_safe(int scope) if (scope != SCOPE_LOCAL_CPU) return all_safe; - if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list)) + if (is_midr_in_range_list(spectre_bhb_safe_list)) return true; all_safe = false; @@ -917,17 +917,17 @@ static u8 spectre_bhb_loop_affected(void) {}, }; - if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k132_list)) + if (is_midr_in_range_list(spectre_bhb_k132_list)) k = 132; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k38_list)) + else if (is_midr_in_range_list(spectre_bhb_k38_list)) k = 38; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) + else if (is_midr_in_range_list(spectre_bhb_k32_list)) k = 32; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) + else if (is_midr_in_range_list(spectre_bhb_k24_list)) k = 24; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) + else if (is_midr_in_range_list(spectre_bhb_k11_list)) k = 11; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) + else if (is_midr_in_range_list(spectre_bhb_k8_list)) k = 8; return k; diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index ffed412e10a6..378cb6d370b1 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -630,7 +630,7 @@ static const struct midr_range broken_seis[] = { static bool vgic_v3_broken_seis(void) { return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) && - is_midr_in_range_list(read_cpuid_id(), broken_seis)); + is_midr_in_range_list(broken_seis)); } /** diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 867b83c37bdb..095238032ac0 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -893,7 +893,7 @@ static u64 __arch_timer_check_delta(void) {}, }; - if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) { + if (is_midr_in_range_list(broken_cval_midrs)) { pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n"); return CLOCKSOURCE_MASK(31); } diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 29a659d5a273..b7d41c66bd2f 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -386,7 +386,7 @@ static bool armpmu_support_hisi_hw_metric(void) * which will cause kernel panic in virtual machine because of lack of * authority. Thus, this feature is banned for virtual machines. */ - return is_midr_in_range_list(read_cpuid_id(), hip12_cpus) && + return is_midr_in_range_list(hip12_cpus) && is_kernel_in_hyp_mode(); } #endif -- 2.33.0