From: Quan Zhou zhouquan65@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
Add a new entry ("HIP09") in oem_str[] to support detection of the new HiSi CPU type.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Reviewed-by: Zenghui Yu yuzenghui@huawei.com Reviewed-by: Nianyao Tang tangnianyao@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com
Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/kvm/hisilicon/hisi_virt.c | 4 +++- arch/arm64/kvm/hisilicon/hisi_virt.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 9587f9508a79..90c363ed642e 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -15,6 +15,7 @@ static const char * const hisi_cpu_type_str[] = { "Hisi1612", "Hisi1616", "Hisi1620", + "HIP09", "Unknown" };
@@ -22,7 +23,8 @@ static const char * const hisi_cpu_type_str[] = { static const char * const oem_str[] = { "HIP06", /* Hisi 1612 */ "HIP07", /* Hisi 1616 */ - "HIP08" /* Hisi 1620 */ + "HIP08", /* Hisi 1620 */ + "HIP09" /* HIP09 */ };
/* diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index ef8de6a2101e..ebc462bf2a9d 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -10,6 +10,7 @@ enum hisi_cpu_type { HI_1612, HI_1616, HI_1620, + HI_IP09, UNKNOWN_HI_TYPE };
From: Quan Zhou zhouquan65@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
DVMBM is an virtualization extension since HIP09, which allows TLBI executed at NS EL1 to be broadcast in a configurable range of physical CPUs (even with HCR_EL2.FB set). It will bring TLBI broadcast optimization.
Introduce the method to detect and enable this feature. Also add a kernel command parameter "kvm-arm.dvmbm_enabled" (=0 on default) so that users can {en,dis}able DVMBM on need. The parameter description is added under Documentation/.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Reviewed-by: Zenghui Yu yuzenghui@huawei.com Reviewed-by: Nianyao Tang tangnianyao@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com
Signed-off-by: lishusen lishusen2@huawei.com --- .../admin-guide/kernel-parameters.txt | 3 ++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 5 ++ arch/arm64/kvm/hisilicon/hisi_virt.c | 49 +++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 6 +++ 5 files changed, 64 insertions(+)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index e755f76f76bd..49f4b7e41db4 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2648,6 +2648,9 @@ [KVM,ARM] Allow use of GICv4 for direct injection of LPIs.
+ kvm-arm.dvmbm_enabled= + [KVM,ARM] Allow use of HiSilicon DVMBM capability. + kvm_cma_resv_ratio=n [PPC] Reserves given percentage from system memory area for contiguous memory allocation for KVM hash pagetable diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e66e24ea5ab4..be1e1a6e3754 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1155,5 +1155,6 @@ void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
extern bool kvm_ncsnp_support; +extern bool kvm_dvmbm_support;
#endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3b3244b9f6a2..7c08597501bf 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -63,6 +63,9 @@ static bool vgic_present, kvm_arm_initialised; /* Capability of non-cacheable snooping */ bool kvm_ncsnp_support;
+/* Capability of DVMBM */ +bool kvm_dvmbm_support; + static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -2425,8 +2428,10 @@ static __init int kvm_arm_init(void) #ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); + kvm_dvmbm_support = hisi_dvmbm_supported(); #endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); + kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled");
in_hyp_mode = is_kernel_in_hyp_mode();
diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 90c363ed642e..b81488cd663b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -11,6 +11,8 @@
static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE;
+static bool dvmbm_enabled; + static const char * const hisi_cpu_type_str[] = { "Hisi1612", "Hisi1616", @@ -124,3 +126,50 @@ bool hisi_ncsnp_supported(void)
return supported; } + +static int __init early_dvmbm_enable(char *buf) +{ + return strtobool(buf, &dvmbm_enabled); +} +early_param("kvm-arm.dvmbm_enabled", early_dvmbm_enable); + +static void hardware_enable_dvmbm(void *data) +{ + u64 val; + + val = read_sysreg_s(SYS_LSUDVM_CTRL_EL2); + val |= LSUDVM_CTLR_EL2_MASK; + write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); +} + +static void hardware_disable_dvmbm(void *data) +{ + u64 val; + + val = read_sysreg_s(SYS_LSUDVM_CTRL_EL2); + val &= ~LSUDVM_CTLR_EL2_MASK; + write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); +} + +bool hisi_dvmbm_supported(void) +{ + if (cpu_type != HI_IP09) + return false; + + /* Determine whether DVMBM is supported by the hardware */ + if (!(read_sysreg(aidr_el1) & AIDR_EL1_DVMBM_MASK)) + return false; + + /* User provided kernel command-line parameter */ + if (!dvmbm_enabled || !is_kernel_in_hyp_mode()) { + on_each_cpu(hardware_disable_dvmbm, NULL, 1); + return false; + } + + /* + * Enable TLBI Broadcast optimization by setting + * LSUDVM_CTRL_EL2's bit[0]. + */ + on_each_cpu(hardware_enable_dvmbm, NULL, 1); + return true; +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index ebc462bf2a9d..95e5e889dcb1 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -14,7 +14,13 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE };
+/* HIP09 */ +#define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) +#define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) +#define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); +bool hisi_dvmbm_supported(void);
#endif /* __HISI_VIRT_H__ */
From: Quan Zhou zhouquan65@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
We already have cpus_ptr in current thread struct now, through which we can know the pcpu range the thread is allowed to run on. So in kvm_arch_vcpu_{load,put}, we can also know the pcpu range the vcpu thread is allowed to be scheduled on, and that is the range we want to configure for TLBI broadcast.
Introduce two variables cpus_ptr and pre_cpus_ptr in struct kvm_vcpu_arch. @cpus_ptr always comes from current->cpus_ptr and @pre_cpus_ptr always comes from @cpus_ptr.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Reviewed-by: Zenghui Yu yuzenghui@huawei.com Reviewed-by: Nianyao Tang tangnianyao@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com
Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/kvm_host.h | 6 +++++ arch/arm64/kvm/arm.c | 18 +++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 38 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 5 ++++ 4 files changed, 67 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index be1e1a6e3754..9e7b2081e11e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -591,6 +591,12 @@ struct kvm_vcpu_arch {
/* Per-vcpu CCSIDR override or NULL */ u32 *ccsidr; + +#ifdef CONFIG_KVM_HISI_VIRT + /* Copy of current->cpus_ptr */ + cpumask_t *cpus_ptr; + cpumask_t *pre_cpus_ptr; +#endif };
/* diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 7c08597501bf..6d22d5e3626c 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -402,6 +402,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) return err;
+#ifdef CONFIG_KVM_HISI_VIRT + err = kvm_hisi_dvmbm_vcpu_init(vcpu); + if (err) + return err; +#endif + return kvm_share_hyp(vcpu, vcpu + 1); }
@@ -419,6 +425,10 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_pmu_vcpu_destroy(vcpu);
kvm_arm_vcpu_destroy(vcpu); + +#ifdef CONFIG_KVM_HISI_VIRT + kvm_hisi_dvmbm_vcpu_destroy(vcpu); +#endif }
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) @@ -475,6 +485,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) vcpu_set_on_unsupported_cpu(vcpu); + +#ifdef CONFIG_KVM_HISI_VIRT + kvm_hisi_dvmbm_load(vcpu); +#endif }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -490,6 +504,10 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu_clear_on_unsupported_cpu(vcpu); vcpu->cpu = -1; + +#ifdef CONFIG_KVM_HISI_VIRT + kvm_hisi_dvmbm_put(vcpu); +#endif }
static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index b81488cd663b..2c79e7f28ca5 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -173,3 +173,41 @@ bool hisi_dvmbm_supported(void) on_each_cpu(hardware_enable_dvmbm, NULL, 1); return true; } + +int kvm_hisi_dvmbm_vcpu_init(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return 0; + + vcpu->arch.cpus_ptr = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + vcpu->arch.pre_cpus_ptr = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!vcpu->arch.cpus_ptr || !vcpu->arch.pre_cpus_ptr) + return -ENOMEM; + + return 0; +} + +void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + kfree(vcpu->arch.cpus_ptr); + kfree(vcpu->arch.pre_cpus_ptr); +} + +void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + cpumask_copy(vcpu->arch.cpus_ptr, current->cpus_ptr); +} + +void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + cpumask_copy(vcpu->arch.pre_cpus_ptr, vcpu->arch.cpus_ptr); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 95e5e889dcb1..3aac75651733 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -23,4 +23,9 @@ void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void);
+int kvm_hisi_dvmbm_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu); +void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu); +void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu); + #endif /* __HISI_VIRT_H__ */
From: Quan Zhou zhouquan65@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
Introduce dvm_cpumask and dvm_lock in struct kvm_arch. dvm_cpumask will store the union of all vcpus' cpus_ptr and will be used for the TLBI broadcast range. dvm_lock ensures a exclusive manipulation of dvm_cpumask.
In vcpu_load, we should decide whether to perform the subsequent update operation by checking whether dvm_cpumask has changed.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Reviewed-by: Zenghui Yu yuzenghui@huawei.com Reviewed-by: Nianyao Tang tangnianyao@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com
Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/kvm_host.h | 5 +++ arch/arm64/kvm/arm.c | 13 +++++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 53 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 2 ++ 4 files changed, 73 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 9e7b2081e11e..327327fcd444 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -279,6 +279,11 @@ struct kvm_arch { * the associated pKVM instance in the hypervisor. */ struct kvm_protected_vm pkvm; + +#ifdef CONFIG_KVM_HISI_VIRT + spinlock_t dvm_lock; + cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */ +#endif };
struct kvm_vcpu_fault_info { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 6d22d5e3626c..0f47921d5b1f 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -147,6 +147,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret;
+#ifdef CONFIG_KVM_HISI_VIRT + ret = kvm_hisi_init_dvmbm(kvm); + if (ret) + return ret; +#endif + mutex_init(&kvm->arch.config_lock);
#ifdef CONFIG_LOCKDEP @@ -158,6 +164,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) #endif
ret = kvm_share_hyp(kvm, kvm + 1); + if (ret) return ret;
@@ -207,6 +214,12 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) */ void kvm_arch_destroy_vm(struct kvm *kvm) { + int i; + +#ifdef CONFIG_KVM_HISI_VIRT + kvm_hisi_destroy_dvmbm(kvm); +#endif + bitmap_free(kvm->arch.pmu_filter); free_cpumask_var(kvm->arch.supported_cpus);
diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 2c79e7f28ca5..18e2ddd8bf4b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -198,10 +198,42 @@ void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *tmp; + cpumask_t mask; + int i; + + /* Don't bother on old hardware */ if (!kvm_dvmbm_support) return;
cpumask_copy(vcpu->arch.cpus_ptr, current->cpus_ptr); + + if (likely(cpumask_equal(vcpu->arch.cpus_ptr, + vcpu->arch.pre_cpus_ptr))) + return; + + /* Re-calculate dvm_cpumask for this VM */ + spin_lock(&kvm->arch.dvm_lock); + + cpumask_clear(&mask); + kvm_for_each_vcpu(i, tmp, kvm) { + /* + * We may get the stale cpus_ptr if another thread + * is concurrently changing its affinity. It'll + * eventually go through vcpu_load() and we rely on + * the last dvm_lock holder to make things correct. + */ + cpumask_or(&mask, &mask, tmp->arch.cpus_ptr); + } + + if (cpumask_equal(kvm->arch.dvm_cpumask, &mask)) + goto out_unlock; + + cpumask_copy(kvm->arch.dvm_cpumask, &mask); + +out_unlock: + spin_unlock(&kvm->arch.dvm_lock); }
void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu) @@ -211,3 +243,24 @@ void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu)
cpumask_copy(vcpu->arch.pre_cpus_ptr, vcpu->arch.cpus_ptr); } + +int kvm_hisi_init_dvmbm(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return 0; + + spin_lock_init(&kvm->arch.dvm_lock); + kvm->arch.dvm_cpumask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!kvm->arch.dvm_cpumask) + return -ENOMEM; + + return 0; +} + +void kvm_hisi_destroy_dvmbm(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return; + + kfree(kvm->arch.dvm_cpumask); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 3aac75651733..1fd4b3295d78 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -27,5 +27,7 @@ int kvm_hisi_dvmbm_vcpu_init(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu); +int kvm_hisi_init_dvmbm(struct kvm *kvm); +void kvm_hisi_destroy_dvmbm(struct kvm *kvm);
#endif /* __HISI_VIRT_H__ */
From: Quan Zhou zhouquan65@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
Implement the capability of DVMBM. Before each vcpu is loaded, we re-calculate the VM-wide dvm_cpumask, and if it's changed we will kick all other vcpus out to reload the latest LSUDVMBM value to the register, and a new request KVM_REQ_RELOAD_DVMBM is added to implement this.
Otherwise if the dvm_cpumask is not changed by this single vcpu, in order to ensure the correctness of the contents in the register, we reload the LSUDVMBM value to the register and nothing else will be done.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Reviewed-by: Zenghui Yu yuzenghui@huawei.com Reviewed-by: Nianyao Tang tangnianyao@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com
Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/kvm_host.h | 2 + arch/arm64/kvm/arm.c | 5 ++ arch/arm64/kvm/hisilicon/hisi_virt.c | 125 ++++++++++++++++++++++++++- arch/arm64/kvm/hisilicon/hisi_virt.h | 28 ++++++ 4 files changed, 159 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 327327fcd444..e9bf556f206f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -50,6 +50,7 @@ #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) +#define KVM_REQ_RELOAD_DVMBM KVM_ARCH_REQ(8)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) @@ -283,6 +284,7 @@ struct kvm_arch { #ifdef CONFIG_KVM_HISI_VIRT spinlock_t dvm_lock; cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */ + u64 lsudvmbm_el2; #endif };
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0f47921d5b1f..c57a2e91a609 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -853,6 +853,11 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_dirty_ring_check_request(vcpu)) return 0; + +#ifdef CONFIG_KVM_HISI_VIRT + if (kvm_check_request(KVM_REQ_RELOAD_DVMBM, vcpu)) + kvm_hisi_reload_lsudvmbm(vcpu->kvm); +#endif }
return 1; diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 18e2ddd8bf4b..10233b801896 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -196,6 +196,97 @@ void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu) kfree(vcpu->arch.pre_cpus_ptr); }
+static void __kvm_write_lsudvmbm(struct kvm *kvm) +{ + write_sysreg_s(kvm->arch.lsudvmbm_el2, SYS_LSUDVMBM_EL2); +} + +static void kvm_write_lsudvmbm(struct kvm *kvm) +{ + /* Do we really need to hold the dvm_lock?? */ + spin_lock(&kvm->arch.dvm_lock); + __kvm_write_lsudvmbm(kvm); + spin_unlock(&kvm->arch.dvm_lock); +} + +static int kvm_dvmbm_get_dies_info(struct kvm *kvm, u64 *vm_aff3s, int size) +{ + int num = 0, cpu; + + for_each_cpu(cpu, kvm->arch.dvm_cpumask) { + bool found = false; + u64 aff3; + int i; + + if (num >= size) + break; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + for (i = 0; i < num; i++) { + if (vm_aff3s[i] == aff3) { + found = true; + break; + } + } + + if (!found) + vm_aff3s[num++] = aff3; + } + + return num; +} + +static void kvm_update_vm_lsudvmbm(struct kvm *kvm) +{ + u64 mpidr, aff3, aff2, aff1; + u64 vm_aff3s[DVMBM_MAX_DIES]; + u64 val; + int cpu, nr_dies; + + nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES); + if (nr_dies > 2) { + val = DVMBM_RANGE_ALL_DIES << DVMBM_RANGE_SHIFT; + goto out_update; + } + + if (nr_dies == 1) { + val = DVMBM_RANGE_ONE_DIE << DVMBM_RANGE_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_SHIFT; + + /* fulfill bits [52:0] */ + for_each_cpu(cpu, kvm->arch.dvm_cpumask) { + mpidr = cpu_logical_map(cpu); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + + val |= 1ULL << (aff2 * 4 + aff1); + } + + goto out_update; + } + + /* nr_dies == 2 */ + val = DVMBM_RANGE_TWO_DIES << DVMBM_RANGE_SHIFT | + DVMBM_GRAN_CLUSTER << DVMBM_GRAN_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_SHIFT | + vm_aff3s[1] << DVMBM_DIE2_SHIFT; + + /* and fulfill bits [43:0] */ + for_each_cpu(cpu, kvm->arch.dvm_cpumask) { + mpidr = cpu_logical_map(cpu); + aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + + if (aff3 == vm_aff3s[0]) + val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + else + val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + } + +out_update: + kvm->arch.lsudvmbm_el2 = val; +} + void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -210,8 +301,10 @@ void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu) cpumask_copy(vcpu->arch.cpus_ptr, current->cpus_ptr);
if (likely(cpumask_equal(vcpu->arch.cpus_ptr, - vcpu->arch.pre_cpus_ptr))) + vcpu->arch.pre_cpus_ptr))) { + kvm_write_lsudvmbm(kvm); return; + }
/* Re-calculate dvm_cpumask for this VM */ spin_lock(&kvm->arch.dvm_lock); @@ -232,7 +325,21 @@ void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu)
cpumask_copy(kvm->arch.dvm_cpumask, &mask);
+ /* + * Perform a heavy invalidation for this VMID. Good place + * to optimize, right? + */ + kvm_flush_remote_tlbs(kvm); + + /* + * Re-calculate LSUDVMBM_EL2 for this VM and kick all vcpus + * out to reload the LSUDVMBM configuration. + */ + kvm_update_vm_lsudvmbm(kvm); + kvm_make_all_cpus_request(kvm, KVM_REQ_RELOAD_DVMBM); + out_unlock: + __kvm_write_lsudvmbm(kvm); spin_unlock(&kvm->arch.dvm_lock); }
@@ -242,6 +349,12 @@ void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu) return;
cpumask_copy(vcpu->arch.pre_cpus_ptr, vcpu->arch.cpus_ptr); + + /* + * We're pretty sure that host kernel runs at EL2 (as + * DVMBM is disabled in case of nVHE) and can't be affected + * by the configured SYS_LSUDVMBM_EL2. + */ }
int kvm_hisi_init_dvmbm(struct kvm *kvm) @@ -264,3 +377,13 @@ void kvm_hisi_destroy_dvmbm(struct kvm *kvm)
kfree(kvm->arch.dvm_cpumask); } + +void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) +{ + if (WARN_ON_ONCE(!kvm_dvmbm_support)) + return; + + preempt_disable(); + kvm_write_lsudvmbm(kvm); + preempt_enable(); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 1fd4b3295d78..aefed2777a9e 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -19,6 +19,33 @@ enum hisi_cpu_type { #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) #define LSUDVM_CTLR_EL2_MASK BIT_ULL(0)
+/* + * MPIDR_EL1 layout on HIP09 + * + * Aff3[7:3] - socket ID [0-15] + * Aff3[2:0] - die ID [1,3] + * Aff2 - cluster ID [0-9] + * Aff1 - core ID [0-3] + * Aff0 - thread ID [0,1] + */ + +#define SYS_LSUDVMBM_EL2 sys_reg(3, 4, 15, 7, 5) +#define DVMBM_RANGE_SHIFT 62 +#define DVMBM_RANGE_ONE_DIE 0ULL +#define DVMBM_RANGE_TWO_DIES 1ULL +#define DVMBM_RANGE_ALL_DIES 3ULL + +#define DVMBM_GRAN_SHIFT 61 +#define DVMBM_GRAN_CLUSTER 0ULL +#define DVMBM_GRAN_DIE 1ULL + +#define DVMBM_DIE1_SHIFT 53 +#define DVMBM_DIE2_SHIFT 45 +#define DVMBM_DIE1_CLUSTER_SHIFT 22 +#define DVMBM_DIE2_CLUSTER_SHIFT 0 + +#define DVMBM_MAX_DIES 32 + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); @@ -29,5 +56,6 @@ void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu); void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu); int kvm_hisi_init_dvmbm(struct kvm *kvm); void kvm_hisi_destroy_dvmbm(struct kvm *kvm); +void kvm_hisi_reload_lsudvmbm(struct kvm *kvm);
#endif /* __HISI_VIRT_H__ */
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,转换为PR失败! 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/Z... 失败原因:补丁集缺失封面信息 建议解决方法:请提供补丁集并重新发送您的补丁集到邮件列表
FeedBack: The patch(es) which you have sent to kernel@openeuler.org has been converted to PR failed! Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/Z... Failed Reason: the cover of the patches is missing Suggest Solution: please checkout and apply the patches' cover and send all again