Replace cpumask_t* by cpumask_var_t and rename related variables
Xiang Chen (1): KVM: arm64: Translate logic cluster id to physical cluster id when updating lsudvmbm
Yanan Wang (1): KVM: arm64: Only probe Hisi ncsnp feature on Hisi CPUs
Zenghui Yu (2): KVM: arm64: Probe Hisi CPU TYPE from ACPI/DTB KVM: arm64: Add support for probing Hisi ncsnp capability
lishusen (5): KVM: arm64: Support a new HiSi CPU type KVM: arm64: Probe and configure DVMBM capability on HiSi CPUs KVM: arm64: Add kvm_vcpu_arch::sched_cpus and pre_sched_cpus KVM: arm64: Add kvm_arch::sched_cpus and sched_lock KVM: arm64: Implement the capability of DVMBM
.../admin-guide/kernel-parameters.txt | 4 + arch/arm64/configs/openeuler_defconfig | 1 + arch/arm64/include/asm/hisi_cpu_model.h | 21 + arch/arm64/include/asm/kvm_host.h | 30 +- arch/arm64/include/asm/kvm_mmu.h | 2 +- arch/arm64/kernel/image-vars.h | 5 + arch/arm64/kvm/Kconfig | 1 + arch/arm64/kvm/Makefile | 1 + arch/arm64/kvm/arm.c | 36 ++ arch/arm64/kvm/hisilicon/Kconfig | 7 + arch/arm64/kvm/hisilicon/Makefile | 2 + arch/arm64/kvm/hisilicon/hisi_virt.c | 515 ++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 103 ++++ arch/arm64/kvm/hyp/pgtable.c | 2 +- 14 files changed, 721 insertions(+), 9 deletions(-) create mode 100644 arch/arm64/include/asm/hisi_cpu_model.h create mode 100644 arch/arm64/kvm/hisilicon/Kconfig create mode 100644 arch/arm64/kvm/hisilicon/Makefile create mode 100644 arch/arm64/kvm/hisilicon/hisi_virt.c create mode 100644 arch/arm64/kvm/hisilicon/hisi_virt.h
From: Zenghui Yu yuzenghui@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
Parse ACPI/DTB to get where the hypervisor is running.
Signed-off-by: Zenghui Yu yuzenghui@huawei.com Signed-off-by: Yanan Wang wangyanan55@huawei.com Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/hisi_cpu_model.h | 19 ++++++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/Makefile | 1 + arch/arm64/kvm/arm.c | 6 ++ arch/arm64/kvm/hisi_cpu_model.c | 83 +++++++++++++++++++++++++ 5 files changed, 110 insertions(+) create mode 100644 arch/arm64/include/asm/hisi_cpu_model.h create mode 100644 arch/arm64/kvm/hisi_cpu_model.c
diff --git a/arch/arm64/include/asm/hisi_cpu_model.h b/arch/arm64/include/asm/hisi_cpu_model.h new file mode 100644 index 000000000000..003a3a53cf33 --- /dev/null +++ b/arch/arm64/include/asm/hisi_cpu_model.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + */ + +#ifndef __HISI_CPU_MODEL_H__ +#define __HISI_CPU_MODEL_H__ + +enum hisi_cpu_type { + HI_1612, + HI_1616, + HI_1620, + UNKNOWN_HI_TYPE +}; + +extern enum hisi_cpu_type hi_cpu_type; + +void probe_hisi_cpu_type(void); +#endif /* __HISI_CPU_MODEL_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index af06ccb7ee34..5154085849fa 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -27,6 +27,7 @@ #include <asm/fpsimd.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> +#include <asm/hisi_cpu_model.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index c0c050e53157..86c6ded87eeb 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -15,6 +15,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ guest.o debug.o reset.o sys_regs.o stacktrace.o \ vgic-sys-reg-v3.o fpsimd.o pkvm.o \ arch_timer.o trng.o vmid.o emulate-nested.o nested.o \ + hisi_cpu_model.o \ vgic/vgic.o vgic/vgic-init.o \ vgic/vgic-irqfd.o vgic/vgic-v2.o \ vgic/vgic-v3.o vgic/vgic-v4.o \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 4866b3f7b4ea..9d57a57283b3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -56,6 +56,9 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
static bool vgic_present, kvm_arm_initialised;
+/* Hisi cpu type enum */ +enum hisi_cpu_type hi_cpu_type = UNKNOWN_HI_TYPE; + static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -2415,6 +2418,9 @@ static __init int kvm_arm_init(void) return err; }
+ /* Probe the Hisi CPU type */ + probe_hisi_cpu_type(); + in_hyp_mode = is_kernel_in_hyp_mode();
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || diff --git a/arch/arm64/kvm/hisi_cpu_model.c b/arch/arm64/kvm/hisi_cpu_model.c new file mode 100644 index 000000000000..4d5a099bc27a --- /dev/null +++ b/arch/arm64/kvm/hisi_cpu_model.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + */ + +#include <linux/acpi.h> +#include <linux/of.h> +#include <linux/init.h> +#include <linux/kvm_host.h> + +#ifdef CONFIG_ACPI + +/* ACPI Hisi oem table id str */ +const char *oem_str[] = { + "HIP06", /* Hisi 1612 */ + "HIP07", /* Hisi 1616 */ + "HIP08" /* Hisi 1620 */ +}; + +/* + * Get Hisi oem table id. + */ +static void acpi_get_hw_cpu_type(void) +{ + struct acpi_table_header *table; + acpi_status status; + int i, str_size = ARRAY_SIZE(oem_str); + + /* Get oem table id from ACPI table header */ + status = acpi_get_table(ACPI_SIG_DSDT, 0, &table); + if (ACPI_FAILURE(status)) { + pr_err("Failed to get ACPI table: %s\n", + acpi_format_exception(status)); + return; + } + + for (i = 0; i < str_size; ++i) { + if (!strncmp(oem_str[i], table->oem_table_id, 5)) { + hi_cpu_type = i; + return; + } + } +} + +#else +static void acpi_get_hw_cpu_type(void) {} +#endif + +/* of Hisi cpu model str */ +const char *of_model_str[] = { + "Hi1612", + "Hi1616" +}; + +static void of_get_hw_cpu_type(void) +{ + const char *cpu_type; + int ret, i, str_size = ARRAY_SIZE(of_model_str); + + ret = of_property_read_string(of_root, "model", &cpu_type); + if (ret < 0) { + pr_err("Failed to get Hisi cpu model by OF.\n"); + return; + } + + for (i = 0; i < str_size; ++i) { + if (strstr(cpu_type, of_model_str[i])) { + hi_cpu_type = i; + return; + } + } +} + +void probe_hisi_cpu_type(void) +{ + if (!acpi_disabled) + acpi_get_hw_cpu_type(); + else + of_get_hw_cpu_type(); + + if (hi_cpu_type == UNKNOWN_HI_TYPE) + pr_warn("UNKNOWN Hisi cpu type.\n"); +}
From: Zenghui Yu yuzenghui@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
Kunpeng 920 offers the HHA ncsnp capability, with which hypervisor doesn't need to perform a lot of cache maintenance like before (in case the guest has some non-cacheable Stage-1 mappings). Currently we apply this hardware capability when
- vCPU switching MMU+caches on/off - creating Stage-2 mappings for Daborts
Signed-off-by: Zenghui Yu yuzenghui@huawei.com Signed-off-by: Yanan Wang wangyanan55@huawei.com Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/hisi_cpu_model.h | 2 ++ arch/arm64/include/asm/kvm_mmu.h | 2 +- arch/arm64/kvm/arm.c | 2 ++ arch/arm64/kvm/hisi_cpu_model.c | 34 +++++++++++++++++++++++++ arch/arm64/kvm/hyp/pgtable.c | 2 +- 5 files changed, 40 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/hisi_cpu_model.h b/arch/arm64/include/asm/hisi_cpu_model.h index 003a3a53cf33..67008d17416e 100644 --- a/arch/arm64/include/asm/hisi_cpu_model.h +++ b/arch/arm64/include/asm/hisi_cpu_model.h @@ -14,6 +14,8 @@ enum hisi_cpu_type { };
extern enum hisi_cpu_type hi_cpu_type; +extern bool kvm_ncsnp_support;
void probe_hisi_cpu_type(void); +void probe_hisi_ncsnp_support(void); #endif /* __HISI_CPU_MODEL_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 96a80e8f6226..d698ce35deb8 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -218,7 +218,7 @@ static inline void __clean_dcache_guest_page(void *va, size_t size) * faulting in pages. Furthermore, FWB implies IDC, so cleaning to * PoU is not required either in this case. */ - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + if (kvm_ncsnp_support || cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) return;
kvm_flush_dcache_to_poc(va, size); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 9d57a57283b3..d6d379aeda2e 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -58,6 +58,7 @@ static bool vgic_present, kvm_arm_initialised;
/* Hisi cpu type enum */ enum hisi_cpu_type hi_cpu_type = UNKNOWN_HI_TYPE; +bool kvm_ncsnp_support;
static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -2420,6 +2421,7 @@ static __init int kvm_arm_init(void)
/* Probe the Hisi CPU type */ probe_hisi_cpu_type(); + probe_hisi_ncsnp_support();
in_hyp_mode = is_kernel_in_hyp_mode();
diff --git a/arch/arm64/kvm/hisi_cpu_model.c b/arch/arm64/kvm/hisi_cpu_model.c index 4d5a099bc27a..52eecf1ba1cf 100644 --- a/arch/arm64/kvm/hisi_cpu_model.c +++ b/arch/arm64/kvm/hisi_cpu_model.c @@ -81,3 +81,37 @@ void probe_hisi_cpu_type(void) if (hi_cpu_type == UNKNOWN_HI_TYPE) pr_warn("UNKNOWN Hisi cpu type.\n"); } + +#define NCSNP_MMIO_BASE 0x20107E238 + +/* + * We have the fantastic HHA ncsnp capability on Kunpeng 920, + * with which hypervisor doesn't need to perform a lot of cache + * maintenance like before (in case the guest has non-cacheable + * Stage-1 mappings). + */ +void probe_hisi_ncsnp_support(void) +{ + void __iomem *base; + unsigned int high; + + kvm_ncsnp_support = false; + + if (hi_cpu_type != HI_1620) + goto out; + + base = ioremap(NCSNP_MMIO_BASE, 4); + if (!base) { + pr_err("Unable to map MMIO region when probing ncsnp!\n"); + goto out; + } + + high = readl_relaxed(base) >> 28; + iounmap(base); + if (high != 0x1) + kvm_ncsnp_support = true; + +out: + kvm_info("Hisi ncsnp: %s\n", kvm_ncsnp_support ? "enabled" : + "disabled"); +} diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index f155b8c9e98c..1ba101ba9392 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1342,7 +1342,7 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) .arg = pgt, };
- if (stage2_has_fwb(pgt)) + if (kvm_ncsnp_support || stage2_has_fwb(pgt)) return 0;
return kvm_pgtable_walk(pgt, addr, size, &walker);
From: Yanan Wang wangyanan55@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
The "ncsnp" is an implementation specific CPU virtualization feature on Hisi 1620 series CPUs. This feature works just like ARM standard S2FWB to reduce some cache management operations in virtualization. Given that it's Hisi specific feature, let's restrict the detection only to Hisi CPUs. To realize this: 1) Add a sub-directory `hisilicon/` within arch/arm64/kvm to hold code for Hisi specific virtualization features. 2) Add a new kconfig option `CONFIG_KVM_HISI_VIRT` for users to select the whole Hisi specific virtualization features. 3) Add a generic global KVM variable `kvm_ncsnp_support` which is `false` by default. Only re-initialize it when we have `CONFIG_KVM_HISI_VIRT` enabled.
Signed-off-by: Yanan Wang wangyanan55@huawei.com Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/configs/openeuler_defconfig | 1 + arch/arm64/include/asm/kvm_host.h | 3 +- arch/arm64/kernel/image-vars.h | 5 + arch/arm64/kvm/Kconfig | 1 + arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/arm.c | 13 ++- arch/arm64/kvm/hisi_cpu_model.c | 117 ----------------------- arch/arm64/kvm/hisilicon/Kconfig | 7 ++ arch/arm64/kvm/hisilicon/Makefile | 2 + arch/arm64/kvm/hisilicon/hisi_virt.c | 124 +++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 19 ++++ 11 files changed, 171 insertions(+), 123 deletions(-) delete mode 100644 arch/arm64/kvm/hisi_cpu_model.c create mode 100644 arch/arm64/kvm/hisilicon/Kconfig create mode 100644 arch/arm64/kvm/hisilicon/Makefile create mode 100644 arch/arm64/kvm/hisilicon/hisi_virt.c create mode 100644 arch/arm64/kvm/hisilicon/hisi_virt.h
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index cc174174c745..eb65231cc67c 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -714,6 +714,7 @@ CONFIG_KVM_VFIO=y CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y CONFIG_HAVE_KVM_IRQ_BYPASS=y CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_KVM_HISI_VIRT=y CONFIG_KVM_XFER_TO_GUEST_WORK=y CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_VIRTUALIZATION=y diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5154085849fa..e66e24ea5ab4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -27,7 +27,6 @@ #include <asm/fpsimd.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> -#include <asm/hisi_cpu_model.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -1155,4 +1154,6 @@ static inline void kvm_hyp_reserve(void) { } void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
+extern bool kvm_ncsnp_support; + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 35f3c7959513..68380ea092ae 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -106,6 +106,11 @@ KVM_NVHE_ALIAS(__hyp_rodata_end); /* pKVM static key */ KVM_NVHE_ALIAS(kvm_protected_mode_initialized);
+#ifdef CONFIG_KVM_HISI_VIRT +/* Capability of non-cacheable snooping */ +KVM_NVHE_ALIAS(kvm_ncsnp_support); +#endif + #endif /* CONFIG_KVM */
#ifdef CONFIG_EFI_ZBOOT diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 83c1e09be42e..8350d43f56d4 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -5,6 +5,7 @@
source "virt/lib/Kconfig" source "virt/kvm/Kconfig" +source "arch/arm64/kvm/hisilicon/Kconfig"
menuconfig VIRTUALIZATION bool "Virtualization" diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 86c6ded87eeb..826a05d072d7 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -15,7 +15,6 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ guest.o debug.o reset.o sys_regs.o stacktrace.o \ vgic-sys-reg-v3.o fpsimd.o pkvm.o \ arch_timer.o trng.o vmid.o emulate-nested.o nested.o \ - hisi_cpu_model.o \ vgic/vgic.o vgic/vgic-init.o \ vgic/vgic-irqfd.o vgic/vgic-v2.o \ vgic/vgic-v3.o vgic/vgic-v4.o \ @@ -24,6 +23,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o +obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/
always-y := hyp_constants.h hyp-constants.s
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index d6d379aeda2e..3b3244b9f6a2 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -47,6 +47,10 @@
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
+#ifdef CONFIG_KVM_HISI_VIRT +#include "hisilicon/hisi_virt.h" +#endif + DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); @@ -56,8 +60,7 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
static bool vgic_present, kvm_arm_initialised;
-/* Hisi cpu type enum */ -enum hisi_cpu_type hi_cpu_type = UNKNOWN_HI_TYPE; +/* Capability of non-cacheable snooping */ bool kvm_ncsnp_support;
static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); @@ -2419,9 +2422,11 @@ static __init int kvm_arm_init(void) return err; }
- /* Probe the Hisi CPU type */ +#ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); - probe_hisi_ncsnp_support(); + kvm_ncsnp_support = hisi_ncsnp_supported(); +#endif + kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled");
in_hyp_mode = is_kernel_in_hyp_mode();
diff --git a/arch/arm64/kvm/hisi_cpu_model.c b/arch/arm64/kvm/hisi_cpu_model.c deleted file mode 100644 index 52eecf1ba1cf..000000000000 --- a/arch/arm64/kvm/hisi_cpu_model.c +++ /dev/null @@ -1,117 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright(c) 2019 Huawei Technologies Co., Ltd - */ - -#include <linux/acpi.h> -#include <linux/of.h> -#include <linux/init.h> -#include <linux/kvm_host.h> - -#ifdef CONFIG_ACPI - -/* ACPI Hisi oem table id str */ -const char *oem_str[] = { - "HIP06", /* Hisi 1612 */ - "HIP07", /* Hisi 1616 */ - "HIP08" /* Hisi 1620 */ -}; - -/* - * Get Hisi oem table id. - */ -static void acpi_get_hw_cpu_type(void) -{ - struct acpi_table_header *table; - acpi_status status; - int i, str_size = ARRAY_SIZE(oem_str); - - /* Get oem table id from ACPI table header */ - status = acpi_get_table(ACPI_SIG_DSDT, 0, &table); - if (ACPI_FAILURE(status)) { - pr_err("Failed to get ACPI table: %s\n", - acpi_format_exception(status)); - return; - } - - for (i = 0; i < str_size; ++i) { - if (!strncmp(oem_str[i], table->oem_table_id, 5)) { - hi_cpu_type = i; - return; - } - } -} - -#else -static void acpi_get_hw_cpu_type(void) {} -#endif - -/* of Hisi cpu model str */ -const char *of_model_str[] = { - "Hi1612", - "Hi1616" -}; - -static void of_get_hw_cpu_type(void) -{ - const char *cpu_type; - int ret, i, str_size = ARRAY_SIZE(of_model_str); - - ret = of_property_read_string(of_root, "model", &cpu_type); - if (ret < 0) { - pr_err("Failed to get Hisi cpu model by OF.\n"); - return; - } - - for (i = 0; i < str_size; ++i) { - if (strstr(cpu_type, of_model_str[i])) { - hi_cpu_type = i; - return; - } - } -} - -void probe_hisi_cpu_type(void) -{ - if (!acpi_disabled) - acpi_get_hw_cpu_type(); - else - of_get_hw_cpu_type(); - - if (hi_cpu_type == UNKNOWN_HI_TYPE) - pr_warn("UNKNOWN Hisi cpu type.\n"); -} - -#define NCSNP_MMIO_BASE 0x20107E238 - -/* - * We have the fantastic HHA ncsnp capability on Kunpeng 920, - * with which hypervisor doesn't need to perform a lot of cache - * maintenance like before (in case the guest has non-cacheable - * Stage-1 mappings). - */ -void probe_hisi_ncsnp_support(void) -{ - void __iomem *base; - unsigned int high; - - kvm_ncsnp_support = false; - - if (hi_cpu_type != HI_1620) - goto out; - - base = ioremap(NCSNP_MMIO_BASE, 4); - if (!base) { - pr_err("Unable to map MMIO region when probing ncsnp!\n"); - goto out; - } - - high = readl_relaxed(base) >> 28; - iounmap(base); - if (high != 0x1) - kvm_ncsnp_support = true; - -out: - kvm_info("Hisi ncsnp: %s\n", kvm_ncsnp_support ? "enabled" : - "disabled"); -} diff --git a/arch/arm64/kvm/hisilicon/Kconfig b/arch/arm64/kvm/hisilicon/Kconfig new file mode 100644 index 000000000000..6536f897a32e --- /dev/null +++ b/arch/arm64/kvm/hisilicon/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +config KVM_HISI_VIRT + bool "HiSilicon SoC specific virtualization features" + depends on ARCH_HISI + help + Support for HiSilicon SoC specific virtualization features. + On non-HiSilicon platforms, say N here. diff --git a/arch/arm64/kvm/hisilicon/Makefile b/arch/arm64/kvm/hisilicon/Makefile new file mode 100644 index 000000000000..849f99d1526d --- /dev/null +++ b/arch/arm64/kvm/hisilicon/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_KVM_HISI_VIRT) += hisi_virt.o diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c new file mode 100644 index 000000000000..9587f9508a79 --- /dev/null +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2022 Huawei Technologies Co., Ltd + */ + +#include <linux/acpi.h> +#include <linux/of.h> +#include <linux/init.h> +#include <linux/kvm_host.h> +#include "hisi_virt.h" + +static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; + +static const char * const hisi_cpu_type_str[] = { + "Hisi1612", + "Hisi1616", + "Hisi1620", + "Unknown" +}; + +/* ACPI Hisi oem table id str */ +static const char * const oem_str[] = { + "HIP06", /* Hisi 1612 */ + "HIP07", /* Hisi 1616 */ + "HIP08" /* Hisi 1620 */ +}; + +/* + * Probe Hisi CPU type form ACPI. + */ +static enum hisi_cpu_type acpi_get_hisi_cpu_type(void) +{ + struct acpi_table_header *table; + acpi_status status; + int i, str_size = ARRAY_SIZE(oem_str); + + /* Get oem table id from ACPI table header */ + status = acpi_get_table(ACPI_SIG_DSDT, 0, &table); + if (ACPI_FAILURE(status)) { + pr_warn("Failed to get ACPI table: %s\n", + acpi_format_exception(status)); + return UNKNOWN_HI_TYPE; + } + + for (i = 0; i < str_size; ++i) { + if (!strncmp(oem_str[i], table->oem_table_id, 5)) + return i; + } + + return UNKNOWN_HI_TYPE; +} + +/* of Hisi cpu model str */ +static const char * const of_model_str[] = { + "Hi1612", + "Hi1616" +}; + +/* + * Probe Hisi CPU type from DT. + */ +static enum hisi_cpu_type of_get_hisi_cpu_type(void) +{ + const char *model; + int ret, i, str_size = ARRAY_SIZE(of_model_str); + + /* + * Note: There may not be a "model" node in FDT, which + * is provided by the vendor. In this case, we are not + * able to get CPU type information through this way. + */ + ret = of_property_read_string(of_root, "model", &model); + if (ret < 0) { + pr_warn("Failed to get Hisi cpu model by OF.\n"); + return UNKNOWN_HI_TYPE; + } + + for (i = 0; i < str_size; ++i) { + if (strstr(model, of_model_str[i])) + return i; + } + + return UNKNOWN_HI_TYPE; +} + +void probe_hisi_cpu_type(void) +{ + if (!acpi_disabled) + cpu_type = acpi_get_hisi_cpu_type(); + else + cpu_type = of_get_hisi_cpu_type(); + + kvm_info("detected: Hisi CPU type '%s'\n", hisi_cpu_type_str[cpu_type]); +} + +/* + * We have the fantastic HHA ncsnp capability on Kunpeng 920, + * with which hypervisor doesn't need to perform a lot of cache + * maintenance like before (in case the guest has non-cacheable + * Stage-1 mappings). + */ +#define NCSNP_MMIO_BASE 0x20107E238 +bool hisi_ncsnp_supported(void) +{ + void __iomem *base; + unsigned int high; + bool supported = false; + + if (cpu_type != HI_1620) + return supported; + + base = ioremap(NCSNP_MMIO_BASE, 4); + if (!base) { + pr_warn("Unable to map MMIO region when probing ncsnp!\n"); + return supported; + } + + high = readl_relaxed(base) >> 28; + iounmap(base); + if (high != 0x1) + supported = true; + + return supported; +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h new file mode 100644 index 000000000000..c4b5acc93fec --- /dev/null +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright(c) 2022 Huawei Technologies Co., Ltd + */ + +#ifndef __HISI_VIRT_H__ +#define __HISI_VIRT_H__ + +enum hisi_cpu_type { + HI_1612, + HI_1616, + HI_1620, + UNKNOWN_HI_TYPE +}; + +void probe_hisi_cpu_type(void); +bool hisi_ncsnp_supported(void); + +#endif /* __HISI_VIRT_H__ */
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
Add a new entry ("HIP09") in oem_str[] to support detection of the new HiSi CPU type.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/kvm/hisilicon/hisi_virt.c | 4 +++- arch/arm64/kvm/hisilicon/hisi_virt.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 9587f9508a79..90c363ed642e 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -15,6 +15,7 @@ static const char * const hisi_cpu_type_str[] = { "Hisi1612", "Hisi1616", "Hisi1620", + "HIP09", "Unknown" };
@@ -22,7 +23,8 @@ static const char * const hisi_cpu_type_str[] = { static const char * const oem_str[] = { "HIP06", /* Hisi 1612 */ "HIP07", /* Hisi 1616 */ - "HIP08" /* Hisi 1620 */ + "HIP08", /* Hisi 1620 */ + "HIP09" /* HIP09 */ };
/* diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index c4b5acc93fec..9231b1dca7f2 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -10,6 +10,7 @@ enum hisi_cpu_type { HI_1612, HI_1616, HI_1620, + HI_IP09, UNKNOWN_HI_TYPE };
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
DVMBM is an virtualization extension since HIP09, which allows TLBI executed at NS EL1 to be broadcast in a configurable range of physical CPUs (even with HCR_EL2.FB set). It will bring TLBI broadcast optimization.
Introduce the method to detect and enable this feature. Also add a kernel command parameter "kvm-arm.dvmbm_enabled" (=0 on default) so that users can {en,dis}able DVMBM on need. The parameter description is added under Documentation/.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Signed-off-by: lishusen lishusen2@huawei.com --- .../admin-guide/kernel-parameters.txt | 4 ++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 5 ++ arch/arm64/kvm/hisilicon/hisi_virt.c | 49 +++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 6 +++ 5 files changed, 65 insertions(+)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index e755f76f76bd..8444c392f338 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2648,6 +2648,10 @@ [KVM,ARM] Allow use of GICv4 for direct injection of LPIs.
+ kvm-arm.dvmbm_enabled= + [KVM,ARM] Allow use of HiSilicon DVMBM capability. + Default: 0 + kvm_cma_resv_ratio=n [PPC] Reserves given percentage from system memory area for contiguous memory allocation for KVM hash pagetable diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e66e24ea5ab4..be1e1a6e3754 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1155,5 +1155,6 @@ void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
extern bool kvm_ncsnp_support; +extern bool kvm_dvmbm_support;
#endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3b3244b9f6a2..7c08597501bf 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -63,6 +63,9 @@ static bool vgic_present, kvm_arm_initialised; /* Capability of non-cacheable snooping */ bool kvm_ncsnp_support;
+/* Capability of DVMBM */ +bool kvm_dvmbm_support; + static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -2425,8 +2428,10 @@ static __init int kvm_arm_init(void) #ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); + kvm_dvmbm_support = hisi_dvmbm_supported(); #endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); + kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled");
in_hyp_mode = is_kernel_in_hyp_mode();
diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 90c363ed642e..b81488cd663b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -11,6 +11,8 @@
static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE;
+static bool dvmbm_enabled; + static const char * const hisi_cpu_type_str[] = { "Hisi1612", "Hisi1616", @@ -124,3 +126,50 @@ bool hisi_ncsnp_supported(void)
return supported; } + +static int __init early_dvmbm_enable(char *buf) +{ + return strtobool(buf, &dvmbm_enabled); +} +early_param("kvm-arm.dvmbm_enabled", early_dvmbm_enable); + +static void hardware_enable_dvmbm(void *data) +{ + u64 val; + + val = read_sysreg_s(SYS_LSUDVM_CTRL_EL2); + val |= LSUDVM_CTLR_EL2_MASK; + write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); +} + +static void hardware_disable_dvmbm(void *data) +{ + u64 val; + + val = read_sysreg_s(SYS_LSUDVM_CTRL_EL2); + val &= ~LSUDVM_CTLR_EL2_MASK; + write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); +} + +bool hisi_dvmbm_supported(void) +{ + if (cpu_type != HI_IP09) + return false; + + /* Determine whether DVMBM is supported by the hardware */ + if (!(read_sysreg(aidr_el1) & AIDR_EL1_DVMBM_MASK)) + return false; + + /* User provided kernel command-line parameter */ + if (!dvmbm_enabled || !is_kernel_in_hyp_mode()) { + on_each_cpu(hardware_disable_dvmbm, NULL, 1); + return false; + } + + /* + * Enable TLBI Broadcast optimization by setting + * LSUDVM_CTRL_EL2's bit[0]. + */ + on_each_cpu(hardware_enable_dvmbm, NULL, 1); + return true; +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 9231b1dca7f2..f505d44e386f 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -14,7 +14,13 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE };
+/* HIP09 */ +#define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) +#define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) +#define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); +bool hisi_dvmbm_supported(void);
#endif /* __HISI_VIRT_H__ */
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
We already have cpus_ptr in current thread struct now, through which we can know the pcpu range the thread is allowed to run on. So in kvm_arch_vcpu_{load,put}, we can also know the pcpu range the vcpu thread is allowed to be scheduled on, and that is the range we want to configure for TLBI broadcast.
Introduce two variables sched_cpus and pre_sched_cpus in struct kvm_vcpu_arch. @sched_cpus always comes from current->cpus_ptr and @pre_sched_cpus always comes from @sched_cpus.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/kvm_host.h | 6 +++++ arch/arm64/kvm/arm.c | 14 ++++++++--- arch/arm64/kvm/hisilicon/hisi_virt.c | 37 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 25 +++++++++++++++++++ 4 files changed, 78 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index be1e1a6e3754..f108ebe4c2a0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -591,6 +591,12 @@ struct kvm_vcpu_arch {
/* Per-vcpu CCSIDR override or NULL */ u32 *ccsidr; + +#ifdef CONFIG_KVM_HISI_VIRT + /* pCPUs this vCPU can be scheduled on. Pure copy of current->cpus_ptr */ + cpumask_var_t sched_cpus; + cpumask_var_t pre_sched_cpus; +#endif };
/* diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 7c08597501bf..68772a732849 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -47,9 +47,7 @@
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
-#ifdef CONFIG_KVM_HISI_VIRT #include "hisilicon/hisi_virt.h" -#endif
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
@@ -402,6 +400,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) return err;
+ err = kvm_sched_affinity_vcpu_init(vcpu); + if (err) + return err; + return kvm_share_hyp(vcpu, vcpu + 1); }
@@ -419,6 +421,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_pmu_vcpu_destroy(vcpu);
kvm_arm_vcpu_destroy(vcpu); + + kvm_sched_affinity_vcpu_destroy(vcpu); }
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) @@ -475,6 +479,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) vcpu_set_on_unsupported_cpu(vcpu); + + kvm_tlbi_dvmbm_vcpu_load(vcpu); }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -490,6 +496,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu_clear_on_unsupported_cpu(vcpu); vcpu->cpu = -1; + + kvm_tlbi_dvmbm_vcpu_put(vcpu); }
static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) @@ -2425,11 +2433,9 @@ static __init int kvm_arm_init(void) return err; }
-#ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); -#endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled");
diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index b81488cd663b..ac12fc54a6b4 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -173,3 +173,40 @@ bool hisi_dvmbm_supported(void) on_each_cpu(hardware_enable_dvmbm, NULL, 1); return true; } + +int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return 0; + + if (!zalloc_cpumask_var(&vcpu->arch.sched_cpus, GFP_ATOMIC) || + !zalloc_cpumask_var(&vcpu->arch.pre_sched_cpus, GFP_ATOMIC)) + return -ENOMEM; + + return 0; +} + +void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + free_cpumask_var(vcpu->arch.sched_cpus); + free_cpumask_var(vcpu->arch.pre_sched_cpus); +} + +void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + cpumask_copy(vcpu->arch.sched_cpus, current->cpus_ptr); +} + +void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + cpumask_copy(vcpu->arch.pre_sched_cpus, vcpu->arch.sched_cpus); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index f505d44e386f..8d8ef6aa165a 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -6,6 +6,7 @@ #ifndef __HISI_VIRT_H__ #define __HISI_VIRT_H__
+#ifdef CONFIG_KVM_HISI_VIRT enum hisi_cpu_type { HI_1612, HI_1616, @@ -23,4 +24,28 @@ void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void);
+int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); +void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu); +void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu); +#else +static inline void probe_hisi_cpu_type(void) {} +static inline bool hisi_ncsnp_supported(void) +{ + return false; +} +static inline bool hisi_dvmbm_supported(void) +{ + return false; +} + +static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) {} +static inline void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) {} +static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} +#endif /* CONFIG_KVM_HISI_VIRT */ + #endif /* __HISI_VIRT_H__ */
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
Introduce sched_cpus and sched_lock in struct kvm_arch. sched_cpus will store the union of all vcpus' cpus_ptr in a VM and will be used for the TLBI broadcast range for this VM. sched_lock ensures a exclusive manipulation of sched_cpus.
In vcpu_load, we should decide whether to perform the subsequent update operation by checking whether sched_cpus has changed.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/kvm_host.h | 5 +++ arch/arm64/kvm/arm.c | 6 ++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 54 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 7 ++++ 4 files changed, 72 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f108ebe4c2a0..660482b939ee 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -279,6 +279,11 @@ struct kvm_arch { * the associated pKVM instance in the hypervisor. */ struct kvm_protected_vm pkvm; + +#ifdef CONFIG_KVM_HISI_VIRT + spinlock_t sched_lock; + cpumask_var_t sched_cpus; /* Union of all vcpu's cpus_ptr */ +#endif };
struct kvm_vcpu_fault_info { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 68772a732849..26b6a6c0f1e4 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -145,6 +145,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret;
+ ret = kvm_sched_affinity_vm_init(kvm); + if (ret) + return ret; + mutex_init(&kvm->arch.config_lock);
#ifdef CONFIG_LOCKDEP @@ -205,6 +209,8 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) */ void kvm_arch_destroy_vm(struct kvm *kvm) { + kvm_sched_affinity_vm_destroy(kvm); + bitmap_free(kvm->arch.pmu_filter); free_cpumask_var(kvm->arch.supported_cpus);
diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index ac12fc54a6b4..d9975ac1bd0b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -197,10 +197,42 @@ void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *tmp; + cpumask_t mask; + unsigned long i; + + /* Don't bother on old hardware */ if (!kvm_dvmbm_support) return;
cpumask_copy(vcpu->arch.sched_cpus, current->cpus_ptr); + + if (likely(cpumask_equal(vcpu->arch.sched_cpus, + vcpu->arch.pre_sched_cpus))) + return; + + /* Re-calculate sched_cpus for this VM */ + spin_lock(&kvm->arch.sched_lock); + + cpumask_clear(&mask); + kvm_for_each_vcpu(i, tmp, kvm) { + /* + * We may get the stale sched_cpus if another thread + * is concurrently changing its affinity. It'll + * eventually go through vcpu_load() and we rely on + * the last sched_lock holder to make things correct. + */ + cpumask_or(&mask, &mask, tmp->arch.sched_cpus); + } + + if (cpumask_equal(kvm->arch.sched_cpus, &mask)) + goto out_unlock; + + cpumask_copy(kvm->arch.sched_cpus, &mask); + +out_unlock: + spin_unlock(&kvm->arch.sched_lock); }
void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) @@ -210,3 +242,25 @@ void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu)
cpumask_copy(vcpu->arch.pre_sched_cpus, vcpu->arch.sched_cpus); } + +int kvm_sched_affinity_vm_init(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return 0; + + spin_lock_init(&kvm->arch.sched_lock); + if (!zalloc_cpumask_var(&kvm->arch.sched_cpus, GFP_ATOMIC)) + return -ENOMEM; + if (!kvm->arch.sched_cpus) + return -ENOMEM; + + return 0; +} + +void kvm_sched_affinity_vm_destroy(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return; + + free_cpumask_var(kvm->arch.sched_cpus); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 8d8ef6aa165a..3de270ad2da5 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -26,6 +26,8 @@ bool hisi_dvmbm_supported(void);
int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); +int kvm_sched_affinity_vm_init(struct kvm *kvm); +void kvm_sched_affinity_vm_destroy(struct kvm *kvm); void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu); void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu); #else @@ -44,6 +46,11 @@ static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) return 0; } static inline void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) {} +static inline int kvm_sched_affinity_vm_init(struct kvm *kvm) +{ + return 0; +} +static inline void kvm_sched_affinity_vm_destroy(struct kvm *kvm) {} static inline void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) {} static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} #endif /* CONFIG_KVM_HISI_VIRT */
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
Implement the capability of DVMBM. Before each vcpu is loaded, we re-calculate the VM-wide sched_cpus, and if it's changed we will kick all other vcpus out to reload the latest LSUDVMBM value to the register, and a new request KVM_REQ_RELOAD_TLBI_DVMBM is added to implement this.
Otherwise if the sched_cpus is not changed by this single vcpu, in order to ensure the correctness of the contents in the register, we reload the LSUDVMBM value to the register and nothing else will be done.
Signed-off-by: Quan Zhou zhouquan65@huawei.com Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/kvm_host.h | 16 ++-- arch/arm64/kvm/arm.c | 3 + arch/arm64/kvm/hisilicon/hisi_virt.c | 114 ++++++++++++++++++++++++++- arch/arm64/kvm/hisilicon/hisi_virt.h | 29 +++++++ 4 files changed, 154 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 660482b939ee..44c6110172db 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -43,13 +43,14 @@
#define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) -#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) -#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) -#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) -#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) -#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) -#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) +#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) +#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) +#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) +#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) +#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) +#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) +#define KVM_REQ_RELOAD_TLBI_DVMBM KVM_ARCH_REQ(8)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) @@ -283,6 +284,7 @@ struct kvm_arch { #ifdef CONFIG_KVM_HISI_VIRT spinlock_t sched_lock; cpumask_var_t sched_cpus; /* Union of all vcpu's cpus_ptr */ + u64 tlbi_dvmbm; #endif };
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 26b6a6c0f1e4..2de4d39fde74 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -836,6 +836,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_dirty_ring_check_request(vcpu)) return 0; + + if (kvm_check_request(KVM_REQ_RELOAD_TLBI_DVMBM, vcpu)) + kvm_hisi_reload_lsudvmbm(vcpu->kvm); }
return 1; diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index d9975ac1bd0b..9fd1d5c08d7e 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -195,6 +195,96 @@ void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) free_cpumask_var(vcpu->arch.pre_sched_cpus); }
+static void __kvm_write_lsudvmbm(struct kvm *kvm) +{ + write_sysreg_s(kvm->arch.tlbi_dvmbm, SYS_LSUDVMBM_EL2); +} + +static void kvm_write_lsudvmbm(struct kvm *kvm) +{ + spin_lock(&kvm->arch.sched_lock); + __kvm_write_lsudvmbm(kvm); + spin_unlock(&kvm->arch.sched_lock); +} + +static int kvm_dvmbm_get_dies_info(struct kvm *kvm, u64 *vm_aff3s, int size) +{ + int num = 0, cpu; + + for_each_cpu(cpu, kvm->arch.sched_cpus) { + bool found = false; + u64 aff3; + int i; + + if (num >= size) + break; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + for (i = 0; i < num; i++) { + if (vm_aff3s[i] == aff3) { + found = true; + break; + } + } + + if (!found) + vm_aff3s[num++] = aff3; + } + + return num; +} + +static void kvm_update_vm_lsudvmbm(struct kvm *kvm) +{ + u64 mpidr, aff3, aff2, aff1; + u64 vm_aff3s[DVMBM_MAX_DIES]; + u64 val; + int cpu, nr_dies; + + nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES); + if (nr_dies > 2) { + val = DVMBM_RANGE_ALL_DIES << DVMBM_RANGE_SHIFT; + goto out_update; + } + + if (nr_dies == 1) { + val = DVMBM_RANGE_ONE_DIE << DVMBM_RANGE_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_SHIFT; + + /* fulfill bits [52:0] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + + val |= 1ULL << (aff2 * 4 + aff1); + } + + goto out_update; + } + + /* nr_dies == 2 */ + val = DVMBM_RANGE_TWO_DIES << DVMBM_RANGE_SHIFT | + DVMBM_GRAN_CLUSTER << DVMBM_GRAN_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_SHIFT | + vm_aff3s[1] << DVMBM_DIE2_SHIFT; + + /* and fulfill bits [43:0] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + + if (aff3 == vm_aff3s[0]) + val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + else + val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + } + +out_update: + kvm->arch.tlbi_dvmbm = val; +} + void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -209,8 +299,10 @@ void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) cpumask_copy(vcpu->arch.sched_cpus, current->cpus_ptr);
if (likely(cpumask_equal(vcpu->arch.sched_cpus, - vcpu->arch.pre_sched_cpus))) + vcpu->arch.pre_sched_cpus))) { + kvm_write_lsudvmbm(kvm); return; + }
/* Re-calculate sched_cpus for this VM */ spin_lock(&kvm->arch.sched_lock); @@ -231,7 +323,17 @@ void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu)
cpumask_copy(kvm->arch.sched_cpus, &mask);
+ kvm_flush_remote_tlbs(kvm); + + /* + * Re-calculate LSUDVMBM_EL2 for this VM and kick all vcpus + * out to reload the LSUDVMBM configuration. + */ + kvm_update_vm_lsudvmbm(kvm); + kvm_make_all_cpus_request(kvm, KVM_REQ_RELOAD_TLBI_DVMBM); + out_unlock: + __kvm_write_lsudvmbm(kvm); spin_unlock(&kvm->arch.sched_lock); }
@@ -264,3 +366,13 @@ void kvm_sched_affinity_vm_destroy(struct kvm *kvm)
free_cpumask_var(kvm->arch.sched_cpus); } + +void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) +{ + if (WARN_ON_ONCE(!kvm_dvmbm_support)) + return; + + preempt_disable(); + kvm_write_lsudvmbm(kvm); + preempt_enable(); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 3de270ad2da5..4e162b7f6688 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -20,6 +20,33 @@ enum hisi_cpu_type { #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) #define LSUDVM_CTLR_EL2_MASK BIT_ULL(0)
+/* + * MPIDR_EL1 layout on HIP09 + * + * Aff3[7:3] - socket ID [0-15] + * Aff3[2:0] - die ID [1,3] + * Aff2 - cluster ID [0-9] + * Aff1 - core ID [0-3] + * Aff0 - thread ID [0,1] + */ + +#define SYS_LSUDVMBM_EL2 sys_reg(3, 4, 15, 7, 5) +#define DVMBM_RANGE_SHIFT 62 +#define DVMBM_RANGE_ONE_DIE 0ULL +#define DVMBM_RANGE_TWO_DIES 1ULL +#define DVMBM_RANGE_ALL_DIES 3ULL + +#define DVMBM_GRAN_SHIFT 61 +#define DVMBM_GRAN_CLUSTER 0ULL +#define DVMBM_GRAN_DIE 1ULL + +#define DVMBM_DIE1_SHIFT 53 +#define DVMBM_DIE2_SHIFT 45 +#define DVMBM_DIE1_CLUSTER_SHIFT 22 +#define DVMBM_DIE2_CLUSTER_SHIFT 0 + +#define DVMBM_MAX_DIES 32 + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); @@ -30,6 +57,7 @@ int kvm_sched_affinity_vm_init(struct kvm *kvm); void kvm_sched_affinity_vm_destroy(struct kvm *kvm); void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu); void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu); +void kvm_hisi_reload_lsudvmbm(struct kvm *kvm); #else static inline void probe_hisi_cpu_type(void) {} static inline bool hisi_ncsnp_supported(void) @@ -53,6 +81,7 @@ static inline int kvm_sched_affinity_vm_init(struct kvm *kvm) static inline void kvm_sched_affinity_vm_destroy(struct kvm *kvm) {} static inline void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) {} static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} +static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #endif /* CONFIG_KVM_HISI_VIRT */
#endif /* __HISI_VIRT_H__ */
From: Xiang Chen chenxiang66@hisilicon.com
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
----------------------------------------------------
For dvmbm feature, MN requires physical cluster id while it is filled with logic cluster id right now. In some situations which physical cluster id is not equal to logic cluster id such as in PG boards, it will cause issues when enabling dvmbm.
To avoid the issue, translate logic cluster id to physical cluster id when updating lsudvmbm.
Signed-off-by: Xiang Chen chenxiang66@hisilicon.com Signed-off-by: Cai Jian caijian11@h-partners.com Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/kvm/arm.c | 3 + arch/arm64/kvm/hisilicon/hisi_virt.c | 151 +++++++++++++++++++++++++-- arch/arm64/kvm/hisilicon/hisi_virt.h | 16 +++ 3 files changed, 163 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 2de4d39fde74..09682bbcbdf5 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2448,6 +2448,9 @@ static __init int kvm_arm_init(void) kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled");
+ if (kvm_dvmbm_support) + kvm_get_pg_cfg(); + in_hyp_mode = is_kernel_in_hyp_mode();
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 9fd1d5c08d7e..d9f6ce83ac39 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -234,12 +234,98 @@ static int kvm_dvmbm_get_dies_info(struct kvm *kvm, u64 *vm_aff3s, int size) return num; }
+static u32 socket_num, die_num; + +static u32 kvm_get_socket_num(void) +{ + int socket_id[MAX_PG_CFG_SOCKETS], cpu; + u32 num = 0; + + for_each_cpu(cpu, cpu_possible_mask) { + bool found = false; + u64 aff3, socket; + int i; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + /* aff3[7:3]: socket ID */ + socket = (aff3 & SOCKET_ID_MASK) >> SOCKET_ID_SHIFT; + for (i = 0; i < num; i++) { + if (socket_id[i] == socket) { + found = true; + break; + } + } + if (!found) + socket_id[num++] = socket; + } + return num; +} + +static u32 kvm_get_die_num(void) +{ + int die_id[MAX_DIES_PER_SOCKET], cpu; + u32 num = 0; + + for_each_cpu(cpu, cpu_possible_mask) { + bool found = false; + u64 aff3, die; + int i; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + /* aff3[2:0]: die ID */ + die = aff3 & DIE_ID_MASK; + for (i = 0; i < num; i++) { + if (die_id[i] == die) { + found = true; + break; + } + } + if (!found) + die_id[num++] = die; + } + return num; +} + +static u32 g_die_pg[MAX_PG_CFG_SOCKETS * MAX_DIES_PER_SOCKET][MAX_CLUSTERS_PER_DIE]; + +static void kvm_get_die_pg(unsigned long pg_cfg, int socket_id, int die_id) +{ + u32 pg_num = 0, i, j; + u32 pg_flag[MAX_CLUSTERS_PER_DIE]; + u32 die_tmp = socket_id * die_num + die_id; + + for (i = 0; i < MAX_CLUSTERS_PER_DIE; i++) { + if (test_bit(i, &pg_cfg)) + pg_num++; + g_die_pg[die_tmp][i] = i; + pg_flag[i] = 0; + } + + for (i = 0; i < MAX_CLUSTERS_PER_DIE - pg_num; i++) { + if (test_bit(i, &pg_cfg)) { + for (j = 0; j < pg_num; j++) { + u32 cluster_bak = MAX_CLUSTERS_PER_DIE - + pg_num + j; + + if (!test_bit(cluster_bak, &pg_cfg) && + !pg_flag[cluster_bak]) { + pg_flag[cluster_bak] = 1; + g_die_pg[die_tmp][i] = cluster_bak; + g_die_pg[die_tmp][cluster_bak] = i; + break; + } + } + } + } +} + static void kvm_update_vm_lsudvmbm(struct kvm *kvm) { - u64 mpidr, aff3, aff2, aff1; + u64 mpidr, aff3, aff2, aff1, phy_aff2; u64 vm_aff3s[DVMBM_MAX_DIES]; u64 val; int cpu, nr_dies; + u32 socket_id, die_id;
nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES); if (nr_dies > 2) { @@ -254,10 +340,18 @@ static void kvm_update_vm_lsudvmbm(struct kvm *kvm) /* fulfill bits [52:0] */ for_each_cpu(cpu, kvm->arch.sched_cpus) { mpidr = cpu_logical_map(cpu); + aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); - - val |= 1ULL << (aff2 * 4 + aff1); + socket_id = (aff3 & SOCKET_ID_MASK) >> SOCKET_ID_SHIFT; + die_id = (aff3 & DIE_ID_MASK) >> DIE_ID_SHIFT; + if (die_id == TOTEM_B_ID) + die_id = 0; + else + die_id = 1; + + phy_aff2 = g_die_pg[socket_id * die_num + die_id][aff2]; + val |= 1ULL << (phy_aff2 * 4 + aff1); }
goto out_update; @@ -274,11 +368,20 @@ static void kvm_update_vm_lsudvmbm(struct kvm *kvm) mpidr = cpu_logical_map(cpu); aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); - - if (aff3 == vm_aff3s[0]) - val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + socket_id = (aff3 & SOCKET_ID_MASK) >> SOCKET_ID_SHIFT; + die_id = (aff3 & DIE_ID_MASK) >> DIE_ID_SHIFT; + if (die_id == TOTEM_B_ID) + die_id = 0; else - val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + die_id = 1; + + if (aff3 == vm_aff3s[0]) { + phy_aff2 = g_die_pg[socket_id * die_num + die_id][aff2]; + val |= 1ULL << (phy_aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + } else { + phy_aff2 = g_die_pg[socket_id * die_num + die_id][aff2]; + val |= 1ULL << (phy_aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + } }
out_update: @@ -345,6 +448,40 @@ void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) cpumask_copy(vcpu->arch.pre_sched_cpus, vcpu->arch.sched_cpus); }
+void kvm_get_pg_cfg(void) +{ + void __iomem *mn_base; + u32 i, j; + u32 pg_cfgs[MAX_PG_CFG_SOCKETS * MAX_DIES_PER_SOCKET]; + u64 mn_phy_base; + u32 val; + + socket_num = kvm_get_socket_num(); + die_num = kvm_get_die_num(); + + for (i = 0; i < socket_num; i++) { + for (j = 0; j < die_num; j++) { + /* + * totem B means the first CPU DIE within a SOCKET + * totem A means the second one. + */ + mn_phy_base = (j == 0) ? TB_MN_BASE : TA_MN_BASE; + mn_phy_base += CHIP_ADDR_OFFSET(i); + mn_phy_base += MN_ECO0_OFFSET; + + mn_base = ioremap(mn_phy_base, 4); + if (!mn_base) { + kvm_info("MN base addr ioremap failed!\n"); + return; + } + val = readl_relaxed(mn_base); + pg_cfgs[j + i * die_num] = val & 0xff; + kvm_get_die_pg(pg_cfgs[j + i * die_num], i, j); + iounmap(mn_base); + } + } +} + int kvm_sched_affinity_vm_init(struct kvm *kvm) { if (!kvm_dvmbm_support) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 4e162b7f6688..e88c0eaf52b7 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -20,6 +20,21 @@ enum hisi_cpu_type { #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) #define LSUDVM_CTLR_EL2_MASK BIT_ULL(0)
+#define MAX_CLUSTERS_PER_DIE 8 +#define TB_MN_BASE 0x00C6067f0000 +#define TA_MN_BASE 0x0046067F0000 +#define CHIP_ADDR_OFFSET(_chip) (((((_chip) >> 3) & 0x1) * 0x80000000000) + \ + ((((_chip) >> 2) & 0x1) * (0x100000000000)) + \ + (((_chip) & 0x3) * 0x200000000000)) +#define MAX_PG_CFG_SOCKETS 4 +#define MAX_DIES_PER_SOCKET 2 +#define MN_ECO0_OFFSET 0xc00 +#define SOCKET_ID_MASK 0xf8 +#define SOCKET_ID_SHIFT 3 +#define DIE_ID_MASK 0x7 +#define DIE_ID_SHIFT 0 +#define TOTEM_B_ID 3 + /* * MPIDR_EL1 layout on HIP09 * @@ -50,6 +65,7 @@ enum hisi_cpu_type { void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +void kvm_get_pg_cfg(void);
int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu);
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/4490 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/O...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/4490 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/O...