From: Quan Zhou zhouquan65@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I62Q2L CVE: NA
----------------------------------------------------
Introduce dvm_cpumask and dvm_lock in struct kvm_arch. dvm_cpumask will store the union of all vcpus' cpus_ptr and will be used for the TLBI broadcast range. dvm_lock ensures a exclusive manipulation of dvm_cpumask.
In vcpu_load, we should decide whether to perform the subsequent update operation by checking whether dvm_cpumask has changed.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Reviewed-by: Zenghui Yu yuzenghui@huawei.com Reviewed-by: Nianyao Tang tangnianyao@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/include/asm/kvm_host.h | 5 +++ arch/arm64/kvm/arm.c | 10 ++++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 53 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 2 ++ 4 files changed, 70 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7b6e2b3fd376..164a90b53195 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -121,6 +121,11 @@ struct kvm_arch { unsigned int pmuver;
u8 pfr0_csv2; + +#ifdef CONFIG_KVM_HISI_VIRT + spinlock_t dvm_lock; + cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */ +#endif };
struct kvm_vcpu_fault_info { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0d8371eca686..67d88b336da2 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -143,6 +143,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret;
+#ifdef CONFIG_KVM_HISI_VIRT + ret = kvm_hisi_init_dvmbm(kvm); + if (ret) + return ret; +#endif + ret = kvm_arm_setup_stage2(kvm, type); if (ret) return ret; @@ -182,6 +188,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm) { int i;
+#ifdef CONFIG_KVM_HISI_VIRT + kvm_hisi_destroy_dvmbm(kvm); +#endif + bitmap_free(kvm->arch.pmu_filter);
kvm_vgic_destroy(kvm); diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 2c79e7f28ca5..18e2ddd8bf4b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -198,10 +198,42 @@ void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *tmp; + cpumask_t mask; + int i; + + /* Don't bother on old hardware */ if (!kvm_dvmbm_support) return;
cpumask_copy(vcpu->arch.cpus_ptr, current->cpus_ptr); + + if (likely(cpumask_equal(vcpu->arch.cpus_ptr, + vcpu->arch.pre_cpus_ptr))) + return; + + /* Re-calculate dvm_cpumask for this VM */ + spin_lock(&kvm->arch.dvm_lock); + + cpumask_clear(&mask); + kvm_for_each_vcpu(i, tmp, kvm) { + /* + * We may get the stale cpus_ptr if another thread + * is concurrently changing its affinity. It'll + * eventually go through vcpu_load() and we rely on + * the last dvm_lock holder to make things correct. + */ + cpumask_or(&mask, &mask, tmp->arch.cpus_ptr); + } + + if (cpumask_equal(kvm->arch.dvm_cpumask, &mask)) + goto out_unlock; + + cpumask_copy(kvm->arch.dvm_cpumask, &mask); + +out_unlock: + spin_unlock(&kvm->arch.dvm_lock); }
void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu) @@ -211,3 +243,24 @@ void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu)
cpumask_copy(vcpu->arch.pre_cpus_ptr, vcpu->arch.cpus_ptr); } + +int kvm_hisi_init_dvmbm(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return 0; + + spin_lock_init(&kvm->arch.dvm_lock); + kvm->arch.dvm_cpumask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!kvm->arch.dvm_cpumask) + return -ENOMEM; + + return 0; +} + +void kvm_hisi_destroy_dvmbm(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return; + + kfree(kvm->arch.dvm_cpumask); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 3aac75651733..1fd4b3295d78 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -27,5 +27,7 @@ int kvm_hisi_dvmbm_vcpu_init(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu); +int kvm_hisi_init_dvmbm(struct kvm *kvm); +void kvm_hisi_destroy_dvmbm(struct kvm *kvm);
#endif /* __HISI_VIRT_H__ */