From: Marc Zyngier maz@kernel.org
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I97WGU
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git/commit...
-----------------------------------------------------------
Even if the host isn't using NMIs, that isn't a reason for preventing the feature being available to guests,
Decouple the two and let the capability be available irrespective of the host NMI level of support.
Signed-off-by: Marc Zyngier maz@kernel.org Signed-off-by: Xiang Chen chenxiang66@hisilicon.com Signed-off-by: caijian caijian11@h-partners.com --- arch/arm64/include/asm/cpufeature.h | 3 ++- arch/arm64/kernel/cpufeature.c | 13 +++++++++---- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/sys_regs.c | 2 +- drivers/irqchip/irq-gic-v3.c | 13 ++++++++++++- 5 files changed, 25 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 396ad4bcd8cf..450124238563 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -818,7 +818,8 @@ static __always_inline bool system_uses_irq_prio_masking(void) static __always_inline bool system_uses_nmi(void) { return IS_ENABLED(CONFIG_ARM64_NMI) && - cpus_have_const_cap(ARM64_USES_NMI); + cpus_have_const_cap(ARM64_USES_NMI) && + !system_uses_irq_prio_masking(); }
static inline bool system_supports_mte(void) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 316c57b19c93..dccf8c3a23bb 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2189,20 +2189,24 @@ static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry } #endif
-#ifdef CONFIG_ARM64_NMI static bool use_nmi(const struct arm64_cpu_capabilities *entry, int scope) { if (!has_cpuid_feature(entry, scope)) return false;
/* + * NMI support was not enabled in the kernel, but can still be + * used by guests. Let the world know. + * * Having both real and pseudo NMIs enabled simultaneously is * likely to cause confusion. Since pseudo NMIs must be * enabled with an explicit command line option, if the user * has set that option on a system with real NMIs for some * reason assume they know what they're doing. */ - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) { + if (!IS_ENABLED(CONFIG_ARM64_NMI)) + pr_info("CONFIG_ARM64_NMI disabled, using NMIs for guests only\n"); + else if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) { pr_info("Pseudo NMI enabled, not using architected NMI\n"); return false; } @@ -2210,6 +2214,7 @@ static bool use_nmi(const struct arm64_cpu_capabilities *entry, int scope) return true; }
+#ifdef CONFIG_ARM64_NMI static void nmi_enable(const struct arm64_cpu_capabilities *__unused) { /* @@ -2821,7 +2826,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP) }, -#ifdef CONFIG_ARM64_NMI { .desc = "Non-maskable Interrupts present", .capability = ARM64_HAS_NMI, @@ -2843,9 +2847,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_width = 4, .min_field_value = ID_AA64PFR1_EL1_NMI_IMP, .matches = use_nmi, +#ifdef CONFIG_ARM64_NMI .cpu_enable = nmi_enable, - }, #endif + }, #ifdef CONFIG_ARM64_MPAM { .desc = "Memory Partitioning And Monitoring", diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 9c6b4accb10b..c71ef028dde6 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -192,7 +192,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) /* The maximum number of VCPUs is limited by the host's GIC model */ kvm->max_vcpus = kvm_arm_default_max_vcpus();
- if (system_uses_nmi() && !static_branch_unlikely(&vgic_v3_cpuif_trap)) + if (cpus_have_const_cap(ARM64_HAS_NMI) && !static_branch_unlikely(&vgic_v3_cpuif_trap)) kvm->arch.pfr1_nmi = ID_AA64PFR1_EL1_NMI_IMP;
kvm_arm_init_hypercalls(kvm); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index aea39eab513f..2c05984415a4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1581,7 +1581,7 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
nmi = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR1_EL1_NMI_SHIFT); if (nmi > ID_AA64PFR1_EL1_NMI_IMP || - (nmi && (!system_uses_nmi() || static_branch_unlikely(&vgic_v3_cpuif_trap)))) + (nmi && (!cpus_have_const_cap(ARM64_HAS_NMI) || static_branch_unlikely(&vgic_v3_cpuif_trap)))) return -EINVAL;
/* We can only differ with NMI, and anything else is an error */ diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 7785faa6c7f7..8bb42ac8fb2a 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -148,11 +148,21 @@ static inline bool has_v3_3_nmi(void) { return gic_data.has_nmi && system_uses_nmi(); } + +static bool system_is_nmi_capable(void) +{ + return gic_data.has_nmi && cpus_have_const_cap(ARM64_HAS_NMI); +} #else static inline bool has_v3_3_nmi(void) { return false; } + +static bool system_is_nmi_capable(void) +{ + return false; +} #endif
#ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS @@ -2330,6 +2340,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + gic_v3_kvm_info.has_nmi = system_is_nmi_capable(); #ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS gic_v3_kvm_info.has_vtimer = gic_data.rdists.has_vtimer; #endif @@ -2681,7 +2692,7 @@ static void __init gic_acpi_setup_kvm_info(void)
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; - gic_v3_kvm_info.has_nmi = has_v3_3_nmi(); + gic_v3_kvm_info.has_nmi = system_is_nmi_capable(); #ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS gic_v3_kvm_info.has_vtimer = gic_data.rdists.has_vtimer; #endif