
From: Yanan Wang <wangyanan55@huawei.com> virt inclusion category: performance bugzilla: https://gitee.com/openeuler/kernel/issues/IBNSBL -------------------------------- Pv_preempted can not ensure performance improvement in any scenario, so add a module param to enable/disable pv_preempted dynamically if we don't need it. Signed-off-by: Yanan Wang <wangyanan55@huawei.com> Signed-off-by: Dongxu Sun <sundongxu3@huawei.com> --- arch/arm64/include/asm/kvm_host.h | 5 ++++- arch/arm64/kvm/arm.c | 27 ++++++++++++++++++++++++--- arch/arm64/kvm/pvsched.c | 2 ++ 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f171ab3d0d37..aa69338f6628 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -412,6 +412,7 @@ struct kvm_vcpu_arch { /* Guest PV sched state */ struct { bool pv_unhalted; + bool preempted; gpa_t base; } pvsched; @@ -645,12 +646,14 @@ long kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu); void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted); long kvm_pvsched_kick_vcpu(struct kvm_vcpu *vcpu); +extern bool pv_preempted_enable; static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) { vcpu_arch->pvsched.base = GPA_INVALID; + vcpu_arch->pvsched.preempted = false; } -static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch) +static inline bool kvm_arm_is_pvsched_valid(struct kvm_vcpu_arch *vcpu_arch) { return (vcpu_arch->pvsched.base != GPA_INVALID); } diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 240edaa9eb50..d6962ea83e18 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -84,6 +84,15 @@ unsigned int twedel = 0; module_param(twedel, uint, S_IRUGO | S_IWUSR); #endif +static const struct kernel_param_ops pv_preempted_enable_ops = { + .set = param_set_bool, + .get = param_get_bool, +}; + +bool pv_preempted_enable = true; +MODULE_PARM_DESC(pv_preempted_enable, "bool"); +module_param_cb(pv_preempted_enable, &pv_preempted_enable_ops, &pv_preempted_enable, S_IRUGO | S_IWUSR); + static int vcpu_req_reload_wfi_traps(const char *val, const struct kernel_param *kp); static const struct kernel_param_ops force_wfi_trap_ops = { @@ -575,8 +584,20 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); - if (kvm_arm_is_pvsched_enabled(&vcpu->arch)) - kvm_update_pvsched_preempted(vcpu, 0); + /* + * When pv_preempted is changed from enabled to disabled, preempted + * state will not be updated in kvm_arch_vcpu_put/load. So we must + * update the preempted state to 0 for every vCPU in case some vCPUs' + * preempted state will always be 1. + */ + if (kvm_arm_is_pvsched_valid(&vcpu->arch)) { + if (pv_preempted_enable) + kvm_update_pvsched_preempted(vcpu, 0); + else { + if (vcpu->arch.pvsched.preempted) + kvm_update_pvsched_preempted(vcpu, 0); + } + } #ifdef CONFIG_KVM_HISI_VIRT kvm_hisi_dvmbm_load(vcpu); @@ -600,7 +621,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; - if (kvm_arm_is_pvsched_enabled(&vcpu->arch)) + if (kvm_arm_is_pvsched_valid(&vcpu->arch) && pv_preempted_enable) kvm_update_pvsched_preempted(vcpu, 1); #ifdef CONFIG_KVM_HISI_VIRT diff --git a/arch/arm64/kvm/pvsched.c b/arch/arm64/kvm/pvsched.c index dc1768815467..9693415226d1 100644 --- a/arch/arm64/kvm/pvsched.c +++ b/arch/arm64/kvm/pvsched.c @@ -34,6 +34,8 @@ void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted) srcu_read_unlock(&kvm->srcu, idx); pagefault_enable(); + + vcpu->arch.pvsched.preempted = !!preempted; } long kvm_pvsched_kick_vcpu(struct kvm_vcpu *vcpu) -- 2.33.0