
From: Mark Brown <broonie@kernel.org> kunpeng inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I90N2C CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/broonie/misc.git/commit/?h=a... ---------------------------------------------------------------------- FEAT_NMI is not yet useful to guests pending implementation of vGIC support. Mask out the feature from the ID register and prevent guests creating state in ALLINT.ALLINT by activating the trap on write provided in HCRX_EL2.TALLINT when they are running. There is no trap available for reads from ALLINT. We do not need to check for FEAT_HCRX since it is mandatory since v8.7 and FEAT_NMI is a v8.8 feature. Signed-off-by: Mark Brown <broonie@kernel.org> Signed-off-by: Jie Liu <liujie375@h-partners.com> Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> --- arch/arm64/include/asm/cpufeature.h | 21 +++++++++++++++++++++ arch/arm64/include/asm/sysreg.h | 9 +++++++++ arch/arm64/kvm/hyp/switch.c | 7 +++++++ arch/arm64/kvm/sys_regs.c | 2 ++ 4 files changed, 39 insertions(+) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index ae5ac922543a..7892e0a8832f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -402,6 +402,11 @@ static __always_inline bool __cpus_have_const_cap(int num) return static_branch_unlikely(&cpu_hwcap_keys[num]); } +static __always_inline bool system_capabilities_finalized(void) +{ + return static_branch_likely(&arm64_const_caps_ready); +} + static inline bool cpus_have_cap(unsigned int num) { if (num >= ARM64_NCAPS) @@ -417,6 +422,22 @@ static __always_inline bool cpus_have_const_cap(int num) return cpus_have_cap(num); } +/* + * Test for a capability without a runtime check. + * + * Before capabilities are finalized, this will BUG(). + * After capabilities are finalized, this is patched to avoid a runtime check. + * + * @num must be a compile-time constant. + */ +static __always_inline bool cpus_have_final_cap(int num) +{ + if (system_capabilities_finalized()) + return __cpus_have_const_cap(num); + else + BUG(); +} + static inline void cpus_set_cap(unsigned int num) { if (num >= ARM64_NCAPS) { diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 3a1b7ba00245..8c869fd28fc5 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -220,6 +220,8 @@ #define SYS_PAR_EL1_F BIT(0) #define SYS_PAR_EL1_FST GENMASK(6, 1) +#define ID_AA64PFR1_NMI_MASK GENMASK(39, 36) +#define HCRX_EL2_TALLINT BIT(6) #define HCRX_EL2_TALLINT_MASK GENMASK(6, 6) /*** Statistical Profiling Extension ***/ @@ -888,6 +890,13 @@ write_sysreg(__scs_new, sysreg); \ } while (0) +#define sysreg_clear_set_s(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg_s(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg_s(__scs_new, sysreg); \ +} while (0) + #endif #endif /* __ASM_SYSREG_H */ diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 768983bd2326..624e5c83a497 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -87,11 +87,18 @@ static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu) */ write_sysreg(0, pmselr_el0); write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); + + if (cpus_have_final_cap(ARM64_HAS_NMI)) + sysreg_clear_set_s(SYS_HCRX_EL2, 0, HCRX_EL2_TALLINT); + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); } static void __hyp_text __deactivate_traps_common(void) { + if (cpus_have_final_cap(ARM64_HAS_NMI)) + sysreg_clear_set_s(SYS_HCRX_EL2, HCRX_EL2_TALLINT, 0); + write_sysreg(0, hstr_el2); write_sysreg(0, pmuserenr_el0); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 605222ed2943..4fad598eb3dd 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1095,6 +1095,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, (0xfUL << ID_AA64ISAR1_API_SHIFT) | (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); + } else if (id == SYS_ID_AA64PFR1_EL1) { + val &= ~ID_AA64PFR1_NMI_MASK; } return val; -- 2.25.1