From: Xiangkai Yang yangxiangkai@huawei.com
virtCCA feature
Xiangkai Yang (1): cvm and part1
arch/arm64/Kconfig | 8 + arch/arm64/configs/openeuler_defconfig | 5 +- arch/arm64/include/asm/kvm_emulate.h | 18 ++ arch/arm64/include/asm/kvm_host.h | 15 +- arch/arm64/include/asm/kvm_tmm.h | 52 ++++- arch/arm64/include/asm/set_memory.h | 1 + arch/arm64/include/uapi/asm/kvm.h | 4 + arch/arm64/kernel/Makefile | 1 + arch/arm64/kvm/Kconfig | 6 +- arch/arm64/kvm/Makefile | 3 + arch/arm64/kvm/arch_timer.c | 93 ++++++++ arch/arm64/kvm/arm.c | 134 +++++++++++- arch/arm64/kvm/guest.c | 5 + arch/arm64/kvm/mmio.c | 14 +- arch/arm64/kvm/mmu.c | 10 + arch/arm64/kvm/pmu-emul.c | 9 + arch/arm64/kvm/psci.c | 10 +- arch/arm64/kvm/reset.c | 11 + arch/arm64/kvm/vgic/vgic-v3.c | 14 +- arch/arm64/kvm/vgic/vgic.c | 52 ++++- arch/arm64/mm/init.c | 3 + arch/arm64/mm/mmu.c | 5 +- arch/arm64/mm/pageattr.c | 3 + drivers/iommu/arm/arm-smmu-v3/Makefile | 1 + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 178 ++++++++++++++- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 16 ++ drivers/irqchip/irq-gic-v3-its.c | 231 ++++++++++++++++++-- drivers/perf/arm_pmu.c | 17 ++ include/kvm/arm_arch_timer.h | 4 + include/linux/iommu.h | 4 + include/linux/iopoll.h | 38 ++++ include/linux/kvm_host.h | 22 ++ include/linux/perf/arm_pmu.h | 4 + include/uapi/linux/kvm.h | 13 ++ kernel/dma/swiotlb.c | 16 ++ virt/kvm/kvm_main.c | 4 + 36 files changed, 965 insertions(+), 59 deletions(-)
From: Xiangkai Yang yangxiangkai@huawei.com
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IADD42
--------------------------------
virtCCA feature Signed-off-by: Xiangkai Yang yangxiangkai@huawei.com --- arch/arm64/Kconfig | 8 + arch/arm64/configs/openeuler_defconfig | 5 +- arch/arm64/include/asm/kvm_emulate.h | 18 ++ arch/arm64/include/asm/kvm_host.h | 15 +- arch/arm64/include/asm/kvm_tmm.h | 52 ++++- arch/arm64/include/asm/set_memory.h | 1 + arch/arm64/include/uapi/asm/kvm.h | 4 + arch/arm64/kernel/Makefile | 1 + arch/arm64/kvm/Kconfig | 6 +- arch/arm64/kvm/Makefile | 3 + arch/arm64/kvm/arch_timer.c | 93 ++++++++ arch/arm64/kvm/arm.c | 134 +++++++++++- arch/arm64/kvm/guest.c | 5 + arch/arm64/kvm/mmio.c | 14 +- arch/arm64/kvm/mmu.c | 10 + arch/arm64/kvm/pmu-emul.c | 9 + arch/arm64/kvm/psci.c | 10 +- arch/arm64/kvm/reset.c | 11 + arch/arm64/kvm/vgic/vgic-v3.c | 14 +- arch/arm64/kvm/vgic/vgic.c | 52 ++++- arch/arm64/mm/init.c | 3 + arch/arm64/mm/mmu.c | 5 +- arch/arm64/mm/pageattr.c | 3 + drivers/iommu/arm/arm-smmu-v3/Makefile | 1 + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 178 ++++++++++++++- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 16 ++ drivers/irqchip/irq-gic-v3-its.c | 231 ++++++++++++++++++-- drivers/perf/arm_pmu.c | 17 ++ include/kvm/arm_arch_timer.h | 4 + include/linux/iommu.h | 4 + include/linux/iopoll.h | 38 ++++ include/linux/kvm_host.h | 22 ++ include/linux/perf/arm_pmu.h | 4 + include/uapi/linux/kvm.h | 13 ++ kernel/dma/swiotlb.c | 16 ++ virt/kvm/kvm_main.c | 4 + 36 files changed, 965 insertions(+), 59 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a162b83bd7..501ec560a9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2576,6 +2576,14 @@ config COMPAT def_bool y depends on AARCH32_EL0 || ARM64_ILP32
+config HISI_VIRTCCA_GUEST + bool "Enable cvm guest run" + depends on DMA_RESTRICTED_POOL + help + Support VIRTCCA CVM guest based on S-EL2 + + If unsure, say N. + menu "Power management options"
source "kernel/power/Kconfig" diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 17338d53a8..072d26fcc2 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -595,6 +595,7 @@ CONFIG_DMI=y # end of Boot options
CONFIG_COMPAT=y +CONFIG_HISI_VIRTCCA_GUEST=y
# # Power management options @@ -758,7 +759,7 @@ CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_KVM_HISI_VIRT=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=y -CONFIG_CVM_HOST=y +CONFIG_HISI_VIRTCCA_HOST=y # CONFIG_NVHE_EL2_DEBUG is not set CONFIG_KVM_ARM_MULTI_LPI_TRANSLATE_CACHE=y CONFIG_ARCH_VCPU_STAT=y @@ -7821,7 +7822,7 @@ CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y CONFIG_SWIOTLB=y # CONFIG_SWIOTLB_DYNAMIC is not set CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y -# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_RESTRICTED_POOL=y CONFIG_DMA_NONCOHERENT_MMAP=y CONFIG_DMA_COHERENT_POOL=y CONFIG_DMA_DIRECT_REMAP=y diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 589ee66e64..2293caab9f 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -639,4 +639,22 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
kvm_write_cptr_el2(val); } + +#ifdef CONFIG_HISI_VIRTCCA_HOST +static inline bool kvm_is_virtcca_cvm(struct kvm *kvm) +{ + if (static_branch_unlikely(&virtcca_cvm_is_available)) + return kvm->arch.is_virtcca_cvm; + return false; +} + +static inline enum virtcca_cvm_state virtcca_cvm_state(struct kvm *kvm) +{ + struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; + + if (!virtcca_cvm) + return 0; + return READ_ONCE(virtcca_cvm->state); +} +#endif #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 66c0bb96f0..ac8115098e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -27,7 +27,7 @@ #include <asm/fpsimd.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> -#ifdef CONFIG_CVM_HOST +#ifdef CONFIG_HISI_VIRTCCA_HOST #include <asm/kvm_tmm.h> #endif
@@ -292,9 +292,12 @@ struct kvm_arch { u64 tlbi_dvmbm; #endif
-#ifdef CONFIG_CVM_HOST - struct cvm cvm; - bool is_cvm; +#ifdef CONFIG_HISI_VIRTCCA_HOST + union { + struct cvm cvm; + struct virtcca_cvm *virtcca_cvm; + }; + bool is_virtcca_cvm; #endif };
@@ -622,8 +625,8 @@ struct kvm_vcpu_arch { cpumask_var_t pre_sched_cpus; #endif
-#ifdef CONFIG_CVM_HOST - struct cvm_tec tec; +#ifdef CONFIG_HISI_VIRTCCA_HOST + struct virtcca_cvm_tec tec; #endif };
diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index f70d73be03..2fd6d2bdb9 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -7,8 +7,11 @@
#include <uapi/linux/kvm.h>
-enum cvm_state { - CVM_STATE_NONE, +#define CVM_MSI_ORIG_IOVA 0x8000000 +#define CVM_MSI_IOVA_OFFSET (-0x1000000) + +enum virtcca_cvm_state { + CVM_STATE_NONE = 1, CVM_STATE_NEW, CVM_STATE_ACTIVE, CVM_STATE_DYING @@ -36,7 +39,7 @@ struct tmi_cvm_params { };
struct cvm { - enum cvm_state state; + enum virtcca_cvm_state state; u32 cvm_vmid; u64 rd; u64 loader_start; @@ -48,33 +51,62 @@ struct cvm { bool is_cvm; };
+struct virtcca_cvm { + enum virtcca_cvm_state state; + u32 cvm_vmid; + u64 rd; + u64 loader_start; + u64 image_end; + u64 initrd_start; + u64 dtb_end; + u64 ram_size; + struct kvm_numa_info numa_info; + struct tmi_cvm_params *params; + bool is_mapped; +}; + /* * struct cvm_tec - Additional per VCPU data for a CVM */ -struct cvm_tec { +struct virtcca_cvm_tec { u64 tec; bool tec_created; void *tec_run; };
+struct cvm_ttt_addr { + struct list_head list; + u64 addr; +}; + +struct iommu_group {}; + int kvm_init_tmm(void); int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); -int kvm_init_cvm_vm(struct kvm *kvm); void kvm_destroy_cvm(struct kvm *kvm); -int kvm_create_tec(struct kvm_vcpu *vcpu); +int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu); void kvm_destroy_tec(struct kvm_vcpu *vcpu); int kvm_tec_enter(struct kvm_vcpu *vcpu); int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status); int kvm_arm_create_cvm(struct kvm *kvm); void kvm_free_rd(struct kvm *kvm); -int cvm_create_rd(struct kvm *kvm); int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target); -int kvm_arch_tec_init(struct kvm_vcpu *vcpu);
void kvm_cvm_unmap_destroy_range(struct kvm *kvm);
-#define CVM_TTT_BLOCK_LEVEL 2 -#define CVM_TTT_MAX_LEVEL 3 +int kvm_cvm_map_range(struct kvm *kvm); +int cvm_arm_smmu_domain_set_kvm(struct iommu_group *group); +int kvm_cvm_map_unmap_ipa_range(struct kvm *kvm, phys_addr_t ipa_base, phys_addr_t pa, + unsigned long map_size, uint32_t is_map); +int kvm_cvm_map_ipa_mmio(struct kvm *kvm, phys_addr_t ipa_base, + phys_addr_t pa, unsigned long map_size); + +#define CVM_TTT_BLOCK_LEVEL 2 +#define CVM_TTT_MAX_LEVEL 3 + +#define CVM_MAP_IPA_RAM 1 +#define CVM_MAP_IPA_SMMU 2 +#define CVM_MAP_IPA_UNPROTECTED 4
#define CVM_PAGE_SHIFT 12 #define CVM_PAGE_SIZE BIT(CVM_PAGE_SHIFT) diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 0f740b7811..2031b31c05 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -4,6 +4,7 @@ #define _ASM_ARM64_SET_MEMORY_H
#include <asm-generic/set_memory.h> +#include <asm/virtcca_cvm_guest.h>
bool can_set_direct_map(void); #define can_set_direct_map can_set_direct_map diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f7ddd73a8c..97941e582d 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -110,6 +110,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ #define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */ +#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */
struct kvm_vcpu_init { __u32 target; @@ -415,6 +416,9 @@ enum { #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 #define KVM_DEV_ARM_ITS_CTRL_RESET 4
+#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256 0 +#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512 1 + /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 #define KVM_ARM_VCPU_PMU_V3_IRQ 0 diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 21ef9c21a4..cccf8332ed 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -81,6 +81,7 @@ obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o obj-$(CONFIG_ARM64_ILP32) += vdso-ilp32/ obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o obj-$(CONFIG_IPI_AS_NMI) += ipi_nmi.o +obj-$(CONFIG_HISI_VIRTCCA_GUEST) += virtcca_cvm_guest.o CFLAGS_patch-scs.o += -mbranch-protection=none
# Force dependency (vdso*-wrap.S includes vdso.so through incbin) diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 1fa6fba607..52edbd7f63 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -49,11 +49,11 @@ menuconfig KVM
If unsure, say N.
-config CVM_HOST - bool "Enable cvm host feature" +config HISI_VIRTCCA_HOST + bool "Enable virtcca cvm host feature" depends on KVM help - Support CVM based on S-EL2 + Support VIRTCCA CVM based on S-EL2
If unsure, say N.
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 952eee572e..eadf41417f 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -24,6 +24,9 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o pvsched.o \
kvm-$(CONFIG_VIRT_PLAT_DEV) += vgic/shadow_dev.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o +kvm-$(CONFIG_HISI_VIRTCCA_HOST) += tmi.o +kvm-$(CONFIG_HISI_VIRTCCA_HOST) += virtcca_cvm.o +kvm-$(CONFIG_HISI_VIRTCCA_HOST) += virtcca_cvm_exit.o obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/
always-y := hyp_constants.h hyp-constants.s diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 43957fce50..3f1129d3df 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -17,6 +17,7 @@ #include <asm/kvm_emulate.h> #include <asm/kvm_hyp.h> #include <asm/kvm_nested.h> +#include <asm/kvm_tmi.h>
#include <kvm/arm_vgic.h> #include <kvm/arm_arch_timer.h> @@ -175,8 +176,77 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) } }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static bool cvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) +{ + return timer_ctx && + ((timer_get_ctl(timer_ctx) & + (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE); +} + +void kvm_cvm_timers_update(struct kvm_vcpu *vcpu) +{ + int i; + u64 cval, now; + bool status, level; + struct arch_timer_context *timer; + struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu; + + for (i = 0; i < NR_KVM_TIMERS; i++) { + timer = &arch_timer->timers[i]; + + if (!timer->loaded) { + if (!cvm_timer_irq_can_fire(timer)) + continue; + cval = timer_get_cval(timer); + now = kvm_phys_timer_read() - timer_get_offset(timer); + level = (cval <= now); + kvm_timer_update_irq(vcpu, level, timer); + } else { + status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT; + level = cvm_timer_irq_can_fire(timer) && status; + if (level != timer->irq.level) + kvm_timer_update_irq(vcpu, level, timer); + } + } +} + +static void set_cvm_timers_loaded(struct kvm_vcpu *vcpu, bool loaded) +{ + int i; + struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu; + + for (i = 0; i < NR_KVM_TIMERS; i++) { + struct arch_timer_context *timer = &arch_timer->timers[i]; + + timer->loaded = loaded; + } +} + +static void kvm_timer_blocking(struct kvm_vcpu *vcpu); +static void kvm_timer_unblocking(struct kvm_vcpu *vcpu); + +static inline void cvm_vcpu_load_timer_callback(struct kvm_vcpu *vcpu) +{ + kvm_cvm_timers_update(vcpu); + kvm_timer_unblocking(vcpu); + set_cvm_timers_loaded(vcpu, true); +} + +static inline void cvm_vcpu_put_timer_callback(struct kvm_vcpu *vcpu) +{ + set_cvm_timers_loaded(vcpu, false); + if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu))) + kvm_timer_blocking(vcpu); +} +#endif + static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) { +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm_is_virtcca_cvm(ctxt->vcpu->kvm)) + return; +#endif if (!ctxt->offset.vm_offset) { WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt)); return; @@ -883,6 +953,13 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + cvm_vcpu_load_timer_callback(vcpu); + return; + } +#endif + if (unlikely(!timer->enabled)) return;
@@ -981,6 +1058,13 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + cvm_vcpu_put_timer_callback(vcpu); + return; + } +#endif + if (unlikely(!timer->enabled)) return;
@@ -1766,6 +1850,15 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) return -EINVAL; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST + /* + * We don't use mapped IRQs for CVM because the TMI doesn't allow + * us setting the LR.HW bit in the VGIC. + */ + if (vcpu_is_tec(vcpu)) + return 0; +#endif + get_timer_map(vcpu, &map);
#ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 333c65ced8..fe3add87ef 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -40,6 +40,8 @@ #include <asm/kvm_pkvm.h> #include <asm/kvm_emulate.h> #include <asm/sections.h> +#include <asm/kvm_tmi.h> +#include <linux/perf/arm_pmu.h>
#include <kvm/arm_hypercalls.h> #include <kvm/arm_pmu.h> @@ -132,6 +134,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, } mutex_unlock(&kvm->slots_lock); break; +#ifdef CONFIG_HISI_VIRTCCA_HOST + case KVM_CAP_ARM_TMM: + if (static_branch_unlikely(&virtcca_cvm_is_available)) + r = kvm_cvm_enable_cap(kvm, cap); + break; +#endif default: r = -EINVAL; break; @@ -153,6 +161,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm_arm_cvm_type(type)) { + ret = kvm_enable_virtcca_cvm(kvm); + if (ret) + return ret; + } +#endif + ret = kvm_sched_affinity_vm_init(kvm); if (ret) return ret; @@ -199,8 +215,20 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm_arm_cvm_type(type)) { + ret = kvm_init_cvm_vm(kvm); + if (ret) + goto out_free_stage2_pgd; + } +#endif + return 0;
+#ifdef CONFIG_HISI_VIRTCCA_HOST +out_free_stage2_pgd: + kvm_free_stage2_pgd(&kvm->arch.mmu); +#endif err_free_cpumask: free_cpumask_var(kvm->arch.supported_cpus); err_unshare_kvm: @@ -235,6 +263,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_unshare_hyp(kvm, kvm + 1);
kvm_arm_teardown_hypercalls(kvm); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm_is_virtcca_cvm(kvm)) + kvm_destroy_cvm(kvm); +#endif }
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) @@ -306,7 +338,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = system_supports_mte(); break; case KVM_CAP_STEAL_TIME: - r = kvm_arm_pvtime_supported(); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm && kvm_is_virtcca_cvm(kvm)) + r = 0; + else +#endif + r = kvm_arm_pvtime_supported(); break; case KVM_CAP_ARM_EL1_32BIT: r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); @@ -346,6 +383,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_VIRT_MSI_BYPASS: r = sdev_enable; break; +#endif +#ifdef CONFIG_HISI_VIRTCCA_HOST + case KVM_CAP_ARM_TMM: + if (!is_armv8_4_sel2_present()) { + r = -ENXIO; + break; + } + r = static_key_enabled(&virtcca_cvm_is_available); + break; #endif default: r = 0; @@ -499,8 +545,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + if (single_task_running()) + vcpu_clear_wfx_traps(vcpu); + else + vcpu_set_wfx_traps(vcpu); + } +#endif kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); + return; + } +#endif if (has_vhe()) kvm_vcpu_load_sysregs_vhe(vcpu); kvm_arch_vcpu_load_fp(vcpu); @@ -531,6 +592,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + kvm_cvm_vcpu_put(vcpu); + return; + } +#endif kvm_arch_vcpu_put_debug_state_flags(vcpu); kvm_arch_vcpu_put_fp(vcpu); if (has_vhe()) @@ -705,6 +772,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) * Tell the rest of the code that there are userspace irqchip * VMs in the wild. */ +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (!kvm_is_virtcca_cvm(kvm)) +#endif static_branch_inc(&userspace_irqchip_in_use); }
@@ -959,6 +1029,18 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) xfer_to_guest_mode_work_pending(); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static inline void update_pmu_phys_irq(struct kvm_vcpu *vcpu, bool *pmu_stopped) +{ + struct kvm_pmu *pmu = &vcpu->arch.pmu; + + if (pmu->irq_level) { + *pmu_stopped = true; + arm_pmu_set_phys_irq(false); + } +} +#endif + /* * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while * the vCPU is running. @@ -1011,6 +1093,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) run->exit_reason = KVM_EXIT_UNKNOWN; run->flags = 0; while (ret > 0) { +#ifdef CONFIG_HISI_VIRTCCA_HOST + bool pmu_stopped = false; +#endif /* * Check conditions before entering the guest */ @@ -1038,6 +1123,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid);
kvm_pmu_flush_hwstate(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + update_pmu_phys_irq(vcpu, &pmu_stopped); +#endif
local_irq_disable();
@@ -1076,7 +1165,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) trace_kvm_entry(*vcpu_pc(vcpu)); guest_timing_enter_irqoff();
- ret = kvm_arm_vcpu_enter_exit(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + ret = kvm_tec_enter(vcpu); + else +#endif + ret = kvm_arm_vcpu_enter_exit(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->stat.exits++; @@ -1130,13 +1224,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
local_irq_enable();
- trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (!vcpu_is_tec(vcpu)) { +#endif + trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
- /* Exit types that need handling before we can be preempted */ - handle_exit_early(vcpu, ret); + /* Exit types that need handling before we can be preempted */ + handle_exit_early(vcpu, ret);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + } +#endif preempt_enable();
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (pmu_stopped) + arm_pmu_set_phys_irq(true); +#endif /* * The ARMv8 architecture doesn't give the hypervisor * a mechanism to prevent a guest from dropping to AArch32 EL0 @@ -1156,7 +1260,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ret = ARM_EXCEPTION_IL; }
- ret = handle_exit(vcpu, ret); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + ret = handle_cvm_exit(vcpu, ret); + else +#endif + ret = handle_exit(vcpu, ret); #ifdef CONFIG_ARCH_VCPU_STAT update_vcpu_stat_time(&vcpu->stat); #endif @@ -1669,6 +1778,11 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm_device_attr attr;
switch (ioctl) { +#ifdef CONFIG_HISI_VIRTCCA_HOST + case KVM_LOAD_USER_DATA: { + return kvm_load_user_data(kvm, arg); + } +#endif case KVM_CREATE_IRQCHIP: { int ret; if (!vgic_present) @@ -2555,6 +2669,14 @@ static __init int kvm_arm_init(void)
in_hyp_mode = is_kernel_in_hyp_mode();
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (static_branch_unlikely(&virtcca_cvm_is_enable) && in_hyp_mode) { + err = kvm_init_tmm(); + if (err) + return err; + } +#endif + if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || cpus_have_final_cap(ARM64_WORKAROUND_1508412)) kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 3ebbb81b15..97c80f85fd 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -26,6 +26,7 @@ #include <asm/kvm_emulate.h> #include <asm/kvm_nested.h> #include <asm/sigcontext.h> +#include <asm/kvm_tmi.h>
#include "trace.h"
@@ -874,6 +875,10 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, bool has_esr = events->exception.serror_has_esr; bool ext_dabt_pending = events->exception.ext_dabt_pending;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return kvm_cvm_vcpu_set_events(vcpu, serror_pending, ext_dabt_pending); +#endif if (serror_pending && has_esr) { if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) return -EINVAL; diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index 3dd38a151d..40d6d056e2 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -6,6 +6,7 @@
#include <linux/kvm_host.h> #include <asm/kvm_emulate.h> +#include <asm/kvm_tmi.h> #include <trace/events/kvm.h>
#include "trace.h" @@ -109,6 +110,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) &data); data = vcpu_data_host_to_guest(vcpu, data, len); vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)-> + tec_entry.gprs[0] = data; + } +#endif }
/* @@ -178,7 +185,12 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) run->mmio.phys_addr = fault_ipa; run->mmio.len = len; vcpu->mmio_needed = 1; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags |= + TEC_ENTRY_FLAG_EMUL_MMIO; + } +#endif if (!ret) { /* We handled the access successfully in the kernel. */ if (!is_write) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 121a3d9024..f3ab2c39f7 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -871,7 +871,11 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t u64 mmfr0, mmfr1; u32 phys_shift;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if ((type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) && (!kvm_is_virtcca_cvm(kvm))) +#else if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) +#endif return -EINVAL;
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); @@ -1414,6 +1418,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); write_fault = kvm_is_write_fault(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + write_fault = true; + prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W; + } +#endif exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); vcpu->stat.mabt_exit_stat++; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6b066e04dc..cdc023f845 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -15,6 +15,7 @@ #include <kvm/arm_pmu.h> #include <kvm/arm_vgic.h> #include <asm/arm_pmuv3.h> +#include <asm/kvm_tmi.h>
#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
@@ -324,6 +325,14 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) { u64 reg = 0;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + + reg = run->tec_exit.pmu_ovf_status; + return reg; + } +#endif if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 1f69b66733..b544418b68 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -12,6 +12,7 @@
#include <asm/cputype.h> #include <asm/kvm_emulate.h> +#include <asm/kvm_tmi.h>
#include <kvm/arm_psci.h> #include <kvm/arm_hypercalls.h> @@ -79,6 +80,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) return PSCI_RET_INVALID_PARAMS;
spin_lock(&vcpu->arch.mp_state_lock); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + cvm_psci_complete(source_vcpu, vcpu); +#endif if (!kvm_arm_vcpu_stopped(vcpu)) { if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) ret = PSCI_RET_ALREADY_ON; @@ -141,7 +146,10 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
/* Ignore other bits of target affinity */ target_affinity &= target_affinity_mask; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return cvm_psci_vcpu_affinity_info(vcpu, target_affinity, lowest_affinity_level); +#endif /* * If one or more VCPU matching target affinity are running * then ON else OFF diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 7a65a35ee4..d38e74db97 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -29,6 +29,7 @@ #include <asm/kvm_mmu.h> #include <asm/kvm_nested.h> #include <asm/virt.h> +#include <asm/kvm_tmi.h>
/* Maximum phys_shift supported for any VM on this host */ static u32 __ro_after_init kvm_ipa_limit; @@ -139,6 +140,12 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) return -EPERM;
return kvm_vcpu_finalize_sve(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + case KVM_ARM_VCPU_TEC: + if (!kvm_is_virtcca_cvm(vcpu->kvm)) + return -EINVAL; + return kvm_finalize_vcpu_tec(vcpu); +#endif }
return -EINVAL; @@ -162,6 +169,10 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); kfree(sve_state); kfree(vcpu->arch.ccsidr); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + kvm_destroy_tec(vcpu); +#endif }
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 69ca111e34..dab599e857 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -10,6 +10,7 @@ #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> #include <asm/kvm_asm.h> +#include <asm/kvm_tmi.h>
#include "vgic.h"
@@ -680,7 +681,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info) (unsigned long long)info->vcpu.start); } else if (kvm_get_mode() != KVM_MODE_PROTECTED) { kvm_vgic_global_state.vcpu_base = info->vcpu.start; - kvm_vgic_global_state.can_emulate_gicv2 = true; +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (!static_branch_unlikely(&virtcca_cvm_is_available)) +#endif + kvm_vgic_global_state.can_emulate_gicv2 = true; ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); if (ret) { kvm_err("Cannot register GICv2 KVM device.\n"); @@ -760,7 +764,13 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + cpu_if->vgic_vmcr = + ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_exit.gicv3_vmcr; + return; + } +#endif if (likely(cpu_if->vgic_sre)) cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); } diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 2459b0adea..ec110006ac 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -11,6 +11,7 @@ #include <linux/nospec.h>
#include <asm/kvm_hyp.h> +#include <asm/kvm_tmi.h>
#include "vgic.h"
@@ -897,12 +898,44 @@ static inline bool can_access_vgic_from_kernel(void) return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe(); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static inline void vgic_tmm_save_state(struct kvm_vcpu *vcpu) +{ + int i; + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run; + + for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) { + cpu_if->vgic_lr[i] = tec_run->tec_exit.gicv3_lrs[i]; + tec_run->tec_entry.gicv3_lrs[i] = 0; + } +} + +static inline void vgic_tmm_restore_state(struct kvm_vcpu *vcpu) +{ + int i; + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run; + + for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) { + tec_run->tec_entry.gicv3_lrs[i] = cpu_if->vgic_lr[i]; + tec_run->tec_exit.gicv3_lrs[i] = cpu_if->vgic_lr[i]; + } +} +#endif + static inline void vgic_save_state(struct kvm_vcpu *vcpu) { if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_save_state(vcpu); else - __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + vgic_tmm_save_state(vcpu); + else +#endif + __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); + }
/* Sync back the hardware VGIC state into our emulation after a guest's run. */ @@ -932,7 +965,12 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu) if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_restore_state(vcpu); else - __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + vgic_tmm_restore_state(vcpu); + else +#endif + __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); }
/* Flush our emulation state into the GIC hardware before entering the guest. */ @@ -973,7 +1011,10 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu) { if (unlikely(!vgic_initialized(vcpu->kvm))) return; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return; +#endif if (kvm_vgic_global_state.type == VGIC_V2) vgic_v2_load(vcpu); else @@ -984,7 +1025,10 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu) { if (unlikely(!vgic_initialized(vcpu->kvm))) return; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return; +#endif if (kvm_vgic_global_state.type == VGIC_V2) vgic_v2_put(vcpu); else diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 8c8d7653ba..66a7fff9f3 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -45,6 +45,7 @@ #include <asm/tlb.h> #include <asm/alternative.h> #include <asm/xen/swiotlb-xen.h> +#include <asm/set_memory.h>
#include "internal.h"
@@ -610,6 +611,8 @@ void __init mem_init(void)
swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
+ swiotlb_cvm_update_mem_attributes(); + /* this will put all unused low memory onto the freelists */ memblock_free_all();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 4142a75a41..31f04f19b6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -40,6 +40,7 @@ #include <asm/tlbflush.h> #include <asm/pgalloc.h> #include <asm/kfence.h> +#include <asm/set_memory.h>
#define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -589,7 +590,7 @@ static void __init map_mem(pgd_t *pgdp)
early_kfence_pool = arm64_kfence_alloc_pool();
- if (can_set_direct_map()) + if (can_set_direct_map() || is_virtcca_cvm_world()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/* @@ -1343,7 +1344,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
VM_BUG_ON(!mhp_range_allowed(start, size, true));
- if (can_set_direct_map()) + if (can_set_direct_map() || is_virtcca_cvm_world()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 0e270a1c51..06e81d1dbc 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -195,6 +195,9 @@ int set_direct_map_default_noflush(struct page *page) #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { + if (is_virtcca_cvm_world()) + return; + if (!can_set_direct_map()) return;
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile index 54feb1eccc..e4020cafcc 100644 --- a/drivers/iommu/arm/arm-smmu-v3/Makefile +++ b/drivers/iommu/arm/arm-smmu-v3/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o arm_smmu_v3-objs-y += arm-smmu-v3.o +arm_smmu_v3-objs-$(CONFIG_HISI_VIRTCCA_HOST) += arm-s-smmu-v3.o arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o arm_smmu_v3-objs := $(arm_smmu_v3-objs-y) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 16217647fd..157538d22f 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -30,6 +30,11 @@ #include "arm-smmu-v3.h" #include "../../dma-iommu.h"
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#include "arm-s-smmu-v3.h" +#endif + static bool disable_bypass = true; module_param(disable_bypass, bool, 0444); MODULE_PARM_DESC(disable_bypass, @@ -102,6 +107,10 @@ static int arm_smmu_bypass_dev_domain_type(struct device *dev) enum arm_smmu_msi_index { EVTQ_MSI_INDEX, GERROR_MSI_INDEX, +#ifdef CONFIG_HISI_VIRTCCA_HOST + S_EVTQ_MSI_INDEX, + S_GERROR_MSI_INDEX, +#endif PRIQ_MSI_INDEX, ARM_SMMU_MAX_MSIS, }; @@ -117,6 +126,18 @@ static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = { ARM_SMMU_GERROR_IRQ_CFG1, ARM_SMMU_GERROR_IRQ_CFG2, }, +#ifdef CONFIG_HISI_VIRTCCA_HOST + [S_EVTQ_MSI_INDEX] = { + ARM_SMMU_S_EVTQ_IRQ_CFG0, + ARM_SMMU_S_EVTQ_IRQ_CFG1, + ARM_SMMU_S_EVTQ_IRQ_CFG2, + }, + [S_GERROR_MSI_INDEX] = { + ARM_SMMU_S_GERROR_IRQ_CFG0, + ARM_SMMU_S_GERROR_IRQ_CFG1, + ARM_SMMU_S_GERROR_IRQ_CFG2, + }, +#endif [PRIQ_MSI_INDEX] = { ARM_SMMU_PRIQ_IRQ_CFG0, ARM_SMMU_PRIQ_IRQ_CFG1, @@ -995,11 +1016,16 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, * Dependency ordering from the cmpxchg() loop above. */ arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); +#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_s_smmu_cmdq_write_entries(smmu, cmds, n); +#endif if (sync) { prod = queue_inc_prod_n(&llq, n); arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod); queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); - + #ifdef CONFIG_HISI_VIRTCCA_HOST + s_queue_write(smmu, cmd_sync, CMDQ_ENT_DWORDS); + #endif /* * In order to determine completion of our CMD_SYNC, we must * ensure that the queue can't wrap twice without us noticing. @@ -2368,6 +2394,12 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (smmu_domain->secure) { + smmu_domain->stage = ARM_SMMU_DOMAIN_S2; + } +#endif + switch (smmu_domain->stage) { case ARM_SMMU_DOMAIN_S1: ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48; @@ -2588,6 +2620,52 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master) arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static void arm_smmu_secure_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) +{ + struct arm_smmu_cmdq_ent prefetch_cmd = { + .opcode = CMDQ_OP_PREFETCH_CFG, + .prefetch = { + .sid = sid, + }, + }; + arm_smmu_sync_ste_for_sid(smmu, sid); + + /* It's likely that we'll want to use the new STE soon */ + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); +} + +static int arm_smmu_secure_dev_operator(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_device *smmu, struct arm_smmu_master *master, struct device *dev) +{ + int i, j; + int ret; + + ret = arm_smmu_secure_set_dev(smmu_domain, master, dev); + if (ret) + return ret; + + for (i = 0; i < master->num_streams; ++i) { + u32 sid = master->streams[i].id; + /* Bridged PCI devices may end up with duplicated IDs */ + for (j = 0; j < i; j++) + if (master->streams[j].id == sid) + break; + if (j < i) + continue; + if (arm_smmu_secure_dev_ste_create(smmu, master, sid)) + return -ENOMEM; + + arm_smmu_secure_sync_ste_for_sid(smmu, sid); + } + + dev_info(smmu->dev, "attach confidential dev: %s", dev_name(dev)); + + return ret; +} +#endif + static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret = 0; @@ -2669,6 +2747,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_install_ste_for_dev(master);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (smmu_domain->secure) + ret = arm_smmu_secure_dev_operator(smmu_domain, smmu, master, dev); +#endif + arm_smmu_enable_ats(master); return 0;
@@ -3300,6 +3383,9 @@ static struct iommu_ops arm_smmu_ops = { .def_domain_type = arm_smmu_def_domain_type, .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, + #ifdef CONFIG_HISI_VIRTCCA_HOST + .iommu_enable_secure = arm_smmu_enable_secure, + #endif .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = arm_smmu_attach_dev, .map_pages = arm_smmu_map_pages, @@ -3362,6 +3448,13 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
q->llq.prod = q->llq.cons = 0; + +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (arm_s_smmu_init_one_queue(smmu, q, qsz, name)) { + return -ENOMEM; + }; +#endif + return 0; }
@@ -3665,6 +3758,15 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) desc->msg.data = msg->data; #endif
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if ((desc->msi_index == S_EVTQ_MSI_INDEX || + desc->msi_index == S_GERROR_MSI_INDEX) && + smmu->id != ARM_SMMU_INVALID_ID) { + arm_smmu_write_s_msi_msg(smmu, cfg, msg, doorbell); + return; + } +#endif + writeq_relaxed(doorbell, smmu->base + cfg[0]); writel_relaxed(msg->data, smmu->base + cfg[1]); writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); @@ -3679,6 +3781,15 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (smmu->id != ARM_SMMU_INVALID_ID) { + if (tmi_smmu_write(smmu->ioaddr, ARM_SMMU_S_GERROR_IRQ_CFG0, 0, 64)) + return; + if (tmi_smmu_write(smmu->ioaddr, ARM_SMMU_S_EVTQ_IRQ_CFG0, 0, 64)) + return; + } +#endif + if (smmu->features & ARM_SMMU_FEAT_PRI) writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); else @@ -3703,6 +3814,13 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX); smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (smmu->id != ARM_SMMU_INVALID_ID) { + smmu->s_evtq_irq = msi_get_virq(dev, S_EVTQ_MSI_INDEX); + smmu->s_gerr_irq = msi_get_virq(dev, S_GERROR_MSI_INDEX); + } +#endif + /* Add callback to free MSIs on teardown */ devm_add_action(dev, arm_smmu_free_msis, dev); } @@ -3791,6 +3909,11 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu, bool resume dev_warn(smmu->dev, "no priq irq - PRI will be broken\n"); } } + +#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_s_smmu_setup_unique_irqs(smmu); +#endif + }
static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool resume) @@ -3806,6 +3929,12 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool resume) return ret; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST + ret = arm_smmu_disable_s_irq(smmu); + if (ret) + return ret; +#endif + irq = smmu->combined_irq; if (irq) { /* @@ -3831,6 +3960,12 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool resume) if (ret) dev_warn(smmu->dev, "failed to enable irqs\n");
+#ifdef CONFIG_HISI_VIRTCCA_HOST + ret = arm_smmu_enable_s_irq(smmu, IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN); + if (ret) + return ret; +#endif + return 0; }
@@ -3842,6 +3977,10 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) if (ret) dev_err(smmu->dev, "failed to clear cr0\n");
+#ifdef CONFIG_HISI_VIRTCCA_HOST + ret = arm_s_smmu_device_disable(smmu); +#endif + return ret; }
@@ -3935,6 +4074,12 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB); writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + ret = arm_s_smmu_device_reset(smmu); + if (ret) + return ret; +#endif + /* CR2 (random crap) */ reg = CR2_PTM | CR2_RECINVSID;
@@ -4027,6 +4172,14 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) if (is_kdump_kernel()) enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + ret = arm_s_smmu_device_enable(smmu, enables, smmu->bypass, disable_bypass); + if (ret) { + dev_err(smmu->dev, "failed to enable secure SMMU interface\n"); + return ret; + } +#endif + /* Enable the SMMU interface, or ensure bypass */ if (!smmu->bypass || disable_bypass) { enables |= CR0_SMMUEN; @@ -4358,6 +4511,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->features |= ARM_SMMU_FEAT_ECMDQ; #endif
+#ifndef CONFIG_HISI_VIRTCCA_HOST + if (reg & IDR1_ECMDQ) + smmu->features |= ARM_SMMU_FEAT_ECMDQ; +#endif + /* Queue sizes, capped to ensure natural alignment */ smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, reg)); @@ -4390,6 +4548,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (smmu->sid_bits <= STRTAB_SPLIT) smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (arm_smmu_s_idr1_support_secure(smmu)) + return -ENXIO; +#endif + /* IDR3 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); switch (FIELD_GET(IDR3_BBML, reg)) { @@ -4748,6 +4911,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) } ioaddr = res->start;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_smmu_map_init(smmu, ioaddr); +#endif + /* * Don't map the IMPLEMENTATION DEFINED regions, since they may contain * the PMCG registers which are reserved by the PMU driver. @@ -4782,6 +4949,11 @@ static int arm_smmu_device_probe(struct platform_device *pdev) irq = platform_get_irq_byname_optional(pdev, "gerror"); if (irq > 0) smmu->gerr_irq = irq; + +#ifdef CONFIG_HISI_VIRTCCA_HOST + platform_get_s_irq_byname_optional(pdev, smmu); +#endif + } /* Probe the h/w */ ret = arm_smmu_device_hw_probe(smmu); @@ -4829,6 +5001,10 @@ static void arm_smmu_device_remove(struct platform_device *pdev) arm_smmu_device_disable(smmu); iopf_queue_free(smmu->evtq.iopf); ida_destroy(&smmu->vmid_map); +#ifdef CONFIG_HISI_VIRTCCA_HOST + free_g_cc_dev_htable(); + arm_smmu_id_free(smmu->id); +#endif }
static void arm_smmu_device_shutdown(struct platform_device *pdev) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 859d217298..43121c17a2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -13,6 +13,9 @@ #include <linux/kernel.h> #include <linux/mmzone.h> #include <linux/sizes.h> +#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <linux/kvm_host.h> +#endif
/* MMIO registers */ #define ARM_SMMU_IDR0 0x0 @@ -750,6 +753,13 @@ struct arm_smmu_device { struct mutex streams_mutex;
bool bypass; + +#ifdef CONFIG_HISI_VIRTCCA_HOST + int s_evtq_irq; + int s_gerr_irq; + resource_size_t ioaddr; + uint64_t id; +#endif };
struct arm_smmu_stream { @@ -802,6 +812,12 @@ struct arm_smmu_domain { spinlock_t devices_lock;
struct list_head mmu_notifiers; + +#ifdef CONFIG_HISI_VIRTCCA_HOST + bool secure; + struct list_head node; + struct kvm *kvm; +#endif };
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 0fa706a478..05700ae898 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -29,6 +29,10 @@ #include <linux/percpu.h> #include <linux/slab.h> #include <linux/syscore_ops.h> +#ifdef CONFIG_HISI_VIRTCCA_GUEST +#include <linux/swiotlb.h> +#include <asm/virtcca_cvm_guest.h> +#endif
#include <linux/irqchip.h> #include <linux/irqchip/arm-gic-v3.h> @@ -399,6 +403,93 @@ static int build_devid_pools(void) } #endif
+#ifdef CONFIG_HISI_VIRTCCA_GUEST + +static struct device cvm_alloc_device; +static LIST_HEAD(cvm_its_nodes); +static raw_spinlock_t cvm_its_lock; + +struct its_device_order { + struct its_device *dev; + struct list_head entry; + int itt_order; +}; + +static inline struct page *its_alloc_shared_pages_node(int node, gfp_t gfp, + unsigned int order) +{ + return swiotlb_alloc(&cvm_alloc_device, (1 << order) * PAGE_SIZE); +} + +static inline struct page *its_alloc_shared_pages(gfp_t gfp, unsigned int order) +{ + return its_alloc_shared_pages_node(NUMA_NO_NODE, gfp, order); +} + +static void its_free_shared_pages(void *addr, int order) +{ + if (order < 0) + return; + + swiotlb_free(&cvm_alloc_device, (struct page *)addr, (1 << order) * PAGE_SIZE); +} + +static int add_its_device_order(struct its_device *dev, int itt_order) +{ + struct its_device_order *new; + unsigned long flags; + + new = kmalloc(sizeof(struct its_device_order), GFP_KERNEL); + if (!new) + return -ENOMEM; + new->dev = dev; + new->itt_order = itt_order; + raw_spin_lock_irqsave(&cvm_its_lock, flags); + list_add_tail(&new->entry, &cvm_its_nodes); + raw_spin_unlock_irqrestore(&cvm_its_lock, flags); + return 0; +} + +/* get its device order and then free its device order */ +static int get_its_device_order(struct its_device *dev) +{ + struct its_device_order *pos, *tmp; + unsigned long flags; + int itt_order = -1; + + raw_spin_lock_irqsave(&cvm_its_lock, flags); + list_for_each_entry_safe(pos, tmp, &cvm_its_nodes, entry) { + if (pos->dev == dev) { + itt_order = pos->itt_order; + list_del(&pos->entry); + kfree(pos); + goto found; + } + } +found: + raw_spin_unlock_irqrestore(&cvm_its_lock, flags); + return itt_order; +} + +static void *its_alloc_shared_page_address(struct its_device *dev, + struct its_node *its, int sz) +{ + struct page *page; + int itt_order; + + itt_order = get_order(sz); + if (add_its_device_order(dev, itt_order)) + return NULL; + + page = its_alloc_shared_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + itt_order); + if (!page) + return NULL; + return (void *)page_address(page); +} + +#endif + /* * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we * always have vSGIs mapped. @@ -2441,7 +2532,13 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags) { struct page *prop_page;
- prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + prop_page = its_alloc_shared_pages(gfp_flags, + get_order(LPI_PROPBASE_SZ)); + else +#endif + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); if (!prop_page) return NULL;
@@ -2452,8 +2549,14 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
static void its_free_prop_table(struct page *prop_page) { - free_pages((unsigned long)page_address(prop_page), - get_order(LPI_PROPBASE_SZ)); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + its_free_shared_pages(page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); + else +#endif + free_pages((unsigned long)page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); }
static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) @@ -2575,7 +2678,13 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, order = get_order(GITS_BASER_PAGES_MAX * psz); }
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + page = its_alloc_shared_pages_node(its->numa_node, + GFP_KERNEL | __GFP_ZERO, order); + else +#endif + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); if (!page) return -ENOMEM;
@@ -2588,7 +2697,12 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, /* 52bit PA is supported only when PageSize=64K */ if (psz != SZ_64K) { pr_err("ITS: no 52bit PA support when psz=%d\n", psz); - free_pages((unsigned long)base, order); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + its_free_shared_pages(base, order); + else +#endif + free_pages((unsigned long)base, order); return -ENXIO; }
@@ -2644,7 +2758,12 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", &its->phys_base, its_base_type_string[type], val, tmp); - free_pages((unsigned long)base, order); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + its_free_shared_pages(base, order); + else +#endif + free_pages((unsigned long)base, order); return -ENXIO; }
@@ -2783,8 +2902,14 @@ static void its_free_tables(struct its_node *its)
for (i = 0; i < GITS_BASER_NR_REGS; i++) { if (its->tables[i].base) { - free_pages((unsigned long)its->tables[i].base, - its->tables[i].order); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + its_free_shared_pages(its->tables[i].base, + its->tables[i].order); + else +#endif + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); its->tables[i].base = NULL; } } @@ -3054,7 +3179,13 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
/* Allocate memory for 2nd level table */ if (!table[idx]) { - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + page = its_alloc_shared_pages(GFP_KERNEL | __GFP_ZERO, + get_order(psz)); + else +#endif + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); if (!page) return false;
@@ -3173,7 +3304,13 @@ static int allocate_vpe_l1_table(void)
pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", np, npg, psz, epp, esz); - page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + page = its_alloc_shared_pages(GFP_ATOMIC | __GFP_ZERO, + get_order(np * PAGE_SIZE)); + else +#endif + page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); if (!page) return -ENOMEM;
@@ -3219,8 +3356,14 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags) { struct page *pend_page;
- pend_page = alloc_pages(gfp_flags | __GFP_ZERO, - get_order(LPI_PENDBASE_SZ)); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + pend_page = its_alloc_shared_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); + else +#endif + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); if (!pend_page) return NULL;
@@ -3232,7 +3375,13 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
static void its_free_pending_table(struct page *pt) { - free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + its_free_shared_pages(page_address(pt), + get_order(LPI_PENDBASE_SZ)); + else +#endif + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); }
/* @@ -3567,8 +3716,15 @@ static bool its_alloc_table_entry(struct its_node *its,
/* Allocate memory for 2nd level table */ if (!table[idx]) { - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, - get_order(baser->psz)); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + page = its_alloc_shared_pages_node(its->numa_node, + GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); + else +#endif + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); if (!page) return false;
@@ -3671,7 +3827,12 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, nr_ites = max(2, nvecs); sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; - itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + itt = its_alloc_shared_page_address(dev, its, sz); + else +#endif + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); if (alloc_lpis) { lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); if (lpi_map) @@ -3685,7 +3846,12 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { kfree(dev); - kfree(itt); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + its_free_shared_pages(itt, get_order(sz)); + else +#endif + kfree(itt); bitmap_free(lpi_map); kfree(col_map); return NULL; @@ -3722,7 +3888,12 @@ static void its_free_device(struct its_device *its_dev) list_del(&its_dev->entry); raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); kfree(its_dev->event_map.col_map); - kfree(its_dev->itt); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + its_free_shared_pages(its_dev->itt, get_its_device_order(its_dev)); + else +#endif + kfree(its_dev->itt);
#ifdef CONFIG_VIRT_PLAT_DEV if (its_dev->is_vdev) { @@ -5465,8 +5636,15 @@ static int __init its_probe_one(struct its_node *its) } }
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, - get_order(ITS_CMD_QUEUE_SZ)); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + page = its_alloc_shared_pages_node(its->numa_node, + GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + else +#endif + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); if (!page) { err = -ENOMEM; goto out_unmap_sgir; @@ -5530,7 +5708,12 @@ static int __init its_probe_one(struct its_node *its) out_free_tables: its_free_tables(its); out_free_cmd: - free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) + its_free_shared_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); + else +#endif + free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); out_unmap_sgir: if (its->sgir_base) iounmap(its->sgir_base); @@ -6019,6 +6202,12 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, #endif int err;
+#ifdef CONFIG_HISI_VIRTCCA_GUEST + if (is_virtcca_cvm_world()) { + device_initialize(&cvm_alloc_device); + raw_spin_lock_init(&cvm_its_lock); + } +#endif gic_rdists = rdists;
its_parent = parent_domain; diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index d712a19e47..3f6a998667 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -736,6 +736,23 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) return 0; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +void arm_pmu_set_phys_irq(bool enable) +{ + int cpu = get_cpu(); + struct arm_pmu *pmu = per_cpu(cpu_armpmu, cpu); + int irq; + + irq = armpmu_get_cpu_irq(pmu, cpu); + if (irq && !enable) + per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); + else if (irq && enable) + per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); + + put_cpu(); +} +#endif + #ifdef CONFIG_CPU_PM static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) { diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index fee20f66da..9d3f034bd8 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -172,4 +172,8 @@ static inline bool has_cntpoff(void) return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +/* Needed for S-EL2 */ +void kvm_cvm_timers_update(struct kvm_vcpu *vcpu); +#endif #endif diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 1d70dd0d0c..0f3a11c2c4 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -635,7 +635,11 @@ struct iommu_ops { struct iommu_domain *blocked_domain; struct iommu_domain *default_domain;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + KABI_USE(1, int (*iommu_enable_secure)(struct iommu_domain *domain)) +#else KABI_RESERVE(1) +#endif KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4) diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 19a7b00baf..865938aebf 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -113,6 +113,44 @@ (cond) ? 0 : -ETIMEDOUT; \ })
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#define kvm_cvm_read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \ + delay_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + u64 rv = 0; \ + int result = 0; \ + unsigned long __delay_us = (delay_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + if (delay_before_read && __delay_us) \ + udelay(__delay_us); \ + for (;;) { \ + rv = op(args); \ + if (rv >> 32) { \ + result = -ENXIO; \ + break; \ + } \ + (val) = (u32)rv; \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + rv = op(args); \ + if (rv >> 32) { \ + result = -ENXIO; \ + break; \ + } \ + (val) = (u32)rv; \ + break; \ + } \ + if (__delay_us) \ + udelay(__delay_us); \ + cpu_relax(); \ + } \ + result ? result : ((cond) ? 0 : -ETIMEDOUT); \ +}) +#endif + /** * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs * @op: accessor function (takes @addr as its only argument) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4557f47353..000ae1ba06 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -543,6 +543,28 @@ static __always_inline void guest_state_exit_irqoff(void) instrumentation_end(); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST + +#define KVM_TYPE_CVM_BIT 8 +#define CVM_MAX_HALT_POLL_NS 100000 + +DECLARE_STATIC_KEY_FALSE(virtcca_cvm_is_available); + +static __always_inline bool vcpu_is_tec(struct kvm_vcpu *vcpu) +{ + if (static_branch_unlikely(&virtcca_cvm_is_available)) + return vcpu->arch.tec.tec_run; + + return false; +} + +static inline bool kvm_arm_cvm_type(unsigned long type) +{ + return type & (1UL << KVM_TYPE_CVM_BIT); +} + +#endif + static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { /* diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 143fbc10ec..c50f292363 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -167,6 +167,10 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn); static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } #endif
+#ifdef CONFIG_HISI_VIRTCCA_HOST +void arm_pmu_set_phys_irq(bool enable); +#endif + #ifdef CONFIG_KVM void kvm_host_pmu_init(struct arm_pmu *pmu); #else diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index cb63d56d8b..15012e7132 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1509,6 +1509,19 @@ struct kvm_numa_info { #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data) + +#define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */ + +struct kvm_user_data { + __u64 loader_start; + __u64 image_end; + __u64 initrd_start; + __u64 dtb_end; + __u64 ram_size; + struct kvm_numa_info numa_info; +}; + /* enable ucontrol for s390 */ struct kvm_s390_ucas_mapping { __u64 user_addr; diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index a7d5fb473b..5c10d1b7c3 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1734,4 +1734,20 @@ static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) }
RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); + +#ifdef CONFIG_HISI_VIRTCCA_GUEST +void __init swiotlb_cvm_update_mem_attributes(void) +{ + void *vaddr; + unsigned long bytes; + + if (!is_virtcca_cvm_world() || !is_swiotlb_allocated()) + return; + vaddr = phys_to_virt(io_tlb_default_mem.defpool.start); + bytes = PAGE_ALIGN(io_tlb_default_mem.defpool.nslabs << IO_TLB_SHIFT); + set_cvm_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); + memset(vaddr, 0, bytes); + io_tlb_default_mem.for_alloc = true; +} +#endif #endif /* CONFIG_DMA_RESTRICTED_POOL */ diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 62f19b4b96..3ccb5f2bc3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3550,6 +3550,10 @@ static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return CVM_MAX_HALT_POLL_NS; +#endif if (kvm->override_halt_poll_ns) { /* * Ensure kvm->max_halt_poll_ns is not read before
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/10716 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/Y...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/10716 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/Y...