From: Quan Zhou zhouquan65@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
Introduce dvm_cpumask and dvm_lock in struct kvm_arch. dvm_cpumask will store the union of all vcpus' cpus_ptr and will be used for the TLBI broadcast range. dvm_lock ensures a exclusive manipulation of dvm_cpumask.
In vcpu_load, we should decide whether to perform the subsequent update operation by checking whether dvm_cpumask has changed.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Reviewed-by: Zenghui Yu yuzenghui@huawei.com Reviewed-by: Nianyao Tang tangnianyao@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com
Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/kvm_host.h | 5 +++ arch/arm64/kvm/arm.c | 11 ++++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 53 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 2 ++ 4 files changed, 71 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 9e7b2081e11e..327327fcd444 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -279,6 +279,11 @@ struct kvm_arch { * the associated pKVM instance in the hypervisor. */ struct kvm_protected_vm pkvm; + +#ifdef CONFIG_KVM_HISI_VIRT + spinlock_t dvm_lock; + cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */ +#endif };
struct kvm_vcpu_fault_info { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 6d22d5e3626c..6bec6043d173 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -147,6 +147,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret;
+#ifdef CONFIG_KVM_HISI_VIRT + ret = kvm_hisi_init_dvmbm(kvm); + if (ret) + return ret; +#endif + mutex_init(&kvm->arch.config_lock);
#ifdef CONFIG_LOCKDEP @@ -158,6 +164,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) #endif
ret = kvm_share_hyp(kvm, kvm + 1); + if (ret) return ret;
@@ -207,6 +214,10 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) */ void kvm_arch_destroy_vm(struct kvm *kvm) { +#ifdef CONFIG_KVM_HISI_VIRT + kvm_hisi_destroy_dvmbm(kvm); +#endif + bitmap_free(kvm->arch.pmu_filter); free_cpumask_var(kvm->arch.supported_cpus);
diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 2c79e7f28ca5..3dd2a747b780 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -198,10 +198,42 @@ void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *tmp; + cpumask_t mask; + unsigned long i; + + /* Don't bother on old hardware */ if (!kvm_dvmbm_support) return;
cpumask_copy(vcpu->arch.cpus_ptr, current->cpus_ptr); + + if (likely(cpumask_equal(vcpu->arch.cpus_ptr, + vcpu->arch.pre_cpus_ptr))) + return; + + /* Re-calculate dvm_cpumask for this VM */ + spin_lock(&kvm->arch.dvm_lock); + + cpumask_clear(&mask); + kvm_for_each_vcpu(i, tmp, kvm) { + /* + * We may get the stale cpus_ptr if another thread + * is concurrently changing its affinity. It'll + * eventually go through vcpu_load() and we rely on + * the last dvm_lock holder to make things correct. + */ + cpumask_or(&mask, &mask, tmp->arch.cpus_ptr); + } + + if (cpumask_equal(kvm->arch.dvm_cpumask, &mask)) + goto out_unlock; + + cpumask_copy(kvm->arch.dvm_cpumask, &mask); + +out_unlock: + spin_unlock(&kvm->arch.dvm_lock); }
void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu) @@ -211,3 +243,24 @@ void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu)
cpumask_copy(vcpu->arch.pre_cpus_ptr, vcpu->arch.cpus_ptr); } + +int kvm_hisi_init_dvmbm(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return 0; + + spin_lock_init(&kvm->arch.dvm_lock); + kvm->arch.dvm_cpumask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!kvm->arch.dvm_cpumask) + return -ENOMEM; + + return 0; +} + +void kvm_hisi_destroy_dvmbm(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return; + + kfree(kvm->arch.dvm_cpumask); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index f28f4af35a9f..0f3c4fa9c33b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -27,5 +27,7 @@ int kvm_hisi_dvmbm_vcpu_init(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu); +int kvm_hisi_init_dvmbm(struct kvm *kvm); +void kvm_hisi_destroy_dvmbm(struct kvm *kvm);
#endif /* __HISI_VIRT_H__ */