virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8WMFU CVE: NA
--------------------------------
Add configuration for feature pv-sched
Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/Kconfig | 7 ++++ arch/arm64/include/asm/kvm_host.h | 4 +++ arch/arm64/include/asm/paravirt.h | 2 ++ arch/arm64/kernel/paravirt.c | 54 ++++++++++++++++--------------- arch/arm64/kvm/arm.c | 21 +++++++++--- arch/arm64/kvm/handle_exit.c | 2 ++ arch/arm64/kvm/hypercalls.c | 4 +++ arch/arm64/kvm/pvsched.c | 2 ++ 8 files changed, 66 insertions(+), 30 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 971cf9d54101..145d85fc37fd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1561,6 +1561,13 @@ config PARAVIRT_SPINLOCKS
If you are unsure how to answer this question, answer Y.
+config PARAVIRT_SCHED + bool "Paravirtualization layer for sched" + depends on PARAVIRT + help + + If you are unsure how to answer this question, answer Y. + config PARAVIRT_TIME_ACCOUNTING bool "Paravirtual steal time accounting" select PARAVIRT diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index bb7549d515db..93782ef4f25e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -596,11 +596,13 @@ struct kvm_vcpu_arch { gpa_t base; } steal;
+#ifdef CONFIG_PARAVIRT_SCHED /* Guest PV sched state */ struct { bool pv_unhalted; gpa_t base; } pvsched; +#endif
/* Per-vcpu CCSIDR override or NULL */ u32 *ccsidr; @@ -1057,6 +1059,7 @@ static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) return (vcpu_arch->steal.base != INVALID_GPA); }
+#ifdef CONFIG_PARAVIRT_SCHED long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu); void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted); long kvm_pvsched_kick_vcpu(struct kvm_vcpu *vcpu); @@ -1070,6 +1073,7 @@ static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch) { return (vcpu_arch->pvsched.base != INVALID_GPA); } +#endif
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h index a29eeffa49aa..1ce7187eed48 100644 --- a/arch/arm64/include/asm/paravirt.h +++ b/arch/arm64/include/asm/paravirt.h @@ -20,7 +20,9 @@ static inline u64 paravirt_steal_clock(int cpu)
int __init pv_time_init(void);
+#ifdef CONFIG_PARAVIRT_SCHED int __init pv_sched_init(void); +#endif
__visible bool __native_vcpu_is_preempted(int cpu); DECLARE_STATIC_CALL(pv_vcpu_preempted, __native_vcpu_is_preempted); diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 73a8b9886775..126dff610e07 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -180,7 +180,7 @@ int __init pv_time_init(void) return 0; }
- +#ifdef CONFIG_PARAVIRT_SCHED DEFINE_PER_CPU(struct pvsched_vcpu_state, pvsched_vcpu_region) __aligned(64); EXPORT_PER_CPU_SYMBOL(pvsched_vcpu_region);
@@ -263,6 +263,33 @@ static bool has_kvm_pvsched(void) return (res.a0 == SMCCC_RET_SUCCESS); }
+int __init pv_sched_init(void) +{ + int ret; + + if (is_hyp_mode_available()) + return 0; + + if (!has_kvm_pvsched()) { + pr_warn("PV sched is not available\n"); + return 0; + } + + ret = kvm_arm_init_pvsched(); + if (ret) + return ret; + + static_call_update(pv_vcpu_preempted, kvm_vcpu_is_preempted); + pr_info("using PV sched preempted\n"); + + pv_qspinlock_init(); + + return 0; +} + +early_initcall(pv_sched_init); +#endif /* CONFIG_PARAVIRT_SCHED */ + #ifdef CONFIG_PARAVIRT_SPINLOCKS static bool arm_pvspin;
@@ -336,28 +363,3 @@ static __init int arm_parse_pvspin(char *arg) } early_param("arm_pvspin", arm_parse_pvspin); #endif /* CONFIG_PARAVIRT_SPINLOCKS */ - -int __init pv_sched_init(void) -{ - int ret; - - if (is_hyp_mode_available()) - return 0; - - if (!has_kvm_pvsched()) { - pr_warn("PV sched is not available\n"); - return 0; - } - - ret = kvm_arm_init_pvsched(); - if (ret) - return ret; - - static_call_update(pv_vcpu_preempted, kvm_vcpu_is_preempted); - pr_info("using PV sched preempted\n"); - - pv_qspinlock_init(); - - return 0; -} -early_initcall(pv_sched_init); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index edffe6c486de..d399e4fae356 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -408,7 +408,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_arm_pvtime_vcpu_init(&vcpu->arch);
+#ifdef CONFIG_PARAVIRT_SCHED kvm_arm_pvsched_vcpu_init(&vcpu->arch); +#endif
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
@@ -497,10 +499,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) vcpu_set_on_unsupported_cpu(vcpu);
+ kvm_tlbi_dvmbm_vcpu_load(vcpu); + +#ifdef CONFIG_PARAVIRT_SCHED if (kvm_arm_is_pvsched_enabled(&vcpu->arch)) kvm_update_pvsched_preempted(vcpu, 0); - - kvm_tlbi_dvmbm_vcpu_load(vcpu); +#endif }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -517,10 +521,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu_clear_on_unsupported_cpu(vcpu); vcpu->cpu = -1;
+ kvm_tlbi_dvmbm_vcpu_put(vcpu); + +#ifdef CONFIG_PARAVIRT_SCHED if (kvm_arm_is_pvsched_enabled(&vcpu->arch)) kvm_update_pvsched_preempted(vcpu, 1); - - kvm_tlbi_dvmbm_vcpu_put(vcpu); +#endif }
static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) @@ -598,10 +604,15 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF); +#ifdef CONFIG_PARAVIRT_SCHED bool pv_unhalted = v->arch.pvsched.pv_unhalted;
return ((irq_lines || kvm_vgic_vcpu_pending_irq(v) || pv_unhalted) && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); +#else + return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) + && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); +#endif }
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) @@ -1385,7 +1396,9 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
spin_unlock(&vcpu->arch.mp_state_lock);
+#ifdef CONFIG_PARAVIRT_SCHED kvm_arm_pvsched_vcpu_init(&vcpu->arch); +#endif
return 0; } diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index b9a44c3bebb7..f4671ae23b1f 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -121,7 +121,9 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu) } else { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); vcpu->stat.wfi_exit_stat++; +#ifdef CONFIG_PARAVIRT_SCHED vcpu->arch.pvsched.pv_unhalted = false; +#endif }
if (esr & ESR_ELx_WFx_ISS_WFxT) { diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 46def0572461..2e7a1d06e6ff 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -332,9 +332,11 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) &smccc_feat->std_hyp_bmap)) val[0] = SMCCC_RET_SUCCESS; break; +#ifdef CONFIG_PARAVIRT_SCHED case ARM_SMCCC_HV_PV_SCHED_FEATURES: val[0] = SMCCC_RET_SUCCESS; break; +#endif } break; case ARM_SMCCC_HV_PV_TIME_FEATURES: @@ -363,6 +365,7 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) case ARM_SMCCC_TRNG_RND32: case ARM_SMCCC_TRNG_RND64: return kvm_trng_call(vcpu); +#ifdef CONFIG_PARAVIRT_SCHED case ARM_SMCCC_HV_PV_SCHED_FEATURES: val[0] = kvm_hypercall_pvsched_features(vcpu); break; @@ -380,6 +383,7 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) case ARM_SMCCC_HV_PV_SCHED_KICK_CPU: val[0] = kvm_pvsched_kick_vcpu(vcpu); break; +#endif default: return kvm_psci_call(vcpu); } diff --git a/arch/arm64/kvm/pvsched.c b/arch/arm64/kvm/pvsched.c index bcfd4760b491..eae6ad7b6c5d 100644 --- a/arch/arm64/kvm/pvsched.c +++ b/arch/arm64/kvm/pvsched.c @@ -4,6 +4,7 @@ * Author: Zengruan Ye yezengruan@huawei.com */
+#ifdef CONFIG_PARAVIRT_SCHED #include <linux/arm-smccc.h> #include <linux/kvm_host.h>
@@ -77,3 +78,4 @@ long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
return val; } +#endif /* CONFIG_PARAVIRT_SCHED */