From: Jingxian He hejingxian@huawei.com
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAGYKI
--------------------------------
Add host support for Confidential VMs: 1. Add new kvm_type for cvm. 2. Init cvm related data while user create vm with cvm type. 3. Add cvm hypervisor which run in sel2 which named tmm. 4. Kvm call tmm interface to create cvm stage2 pagetable and run cvm.
Signed-off-by: Jingxian He hejingxian@huawei.com --- arch/arm64/configs/openeuler_defconfig | 2 +- arch/arm64/include/asm/kvm_emulate.h | 18 + arch/arm64/include/asm/kvm_host.h | 15 +- arch/arm64/include/asm/kvm_tmi.h | 333 +++++++++ arch/arm64/include/asm/kvm_tmm.h | 30 +- arch/arm64/include/uapi/asm/kvm.h | 4 + arch/arm64/kvm/Kconfig | 6 +- arch/arm64/kvm/Makefile | 3 + arch/arm64/kvm/arch_timer.c | 92 +++ arch/arm64/kvm/arm.c | 112 +++- arch/arm64/kvm/guest.c | 8 + arch/arm64/kvm/mmio.c | 17 +- arch/arm64/kvm/mmu.c | 7 + arch/arm64/kvm/psci.c | 12 +- arch/arm64/kvm/reset.c | 13 + arch/arm64/kvm/tmi.c | 141 ++++ arch/arm64/kvm/vgic/vgic-v3.c | 17 +- arch/arm64/kvm/vgic/vgic.c | 55 +- arch/arm64/kvm/virtcca_cvm.c | 889 +++++++++++++++++++++++++ arch/arm64/kvm/virtcca_cvm_exit.c | 221 ++++++ include/kvm/arm_arch_timer.h | 4 + include/linux/kvm_host.h | 22 + include/uapi/linux/kvm.h | 13 + virt/kvm/kvm_main.c | 4 + 24 files changed, 2004 insertions(+), 34 deletions(-) create mode 100644 arch/arm64/include/asm/kvm_tmi.h create mode 100644 arch/arm64/kvm/tmi.c create mode 100644 arch/arm64/kvm/virtcca_cvm.c create mode 100644 arch/arm64/kvm/virtcca_cvm_exit.c
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index f4a8a774d..973e518e6 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -758,7 +758,7 @@ CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_KVM_HISI_VIRT=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=y -CONFIG_CVM_HOST=y +CONFIG_HISI_VIRTCCA_HOST=y # CONFIG_NVHE_EL2_DEBUG is not set CONFIG_KVM_ARM_MULTI_LPI_TRANSLATE_CACHE=y CONFIG_ARCH_VCPU_STAT=y diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 589ee66e6..2293caab9 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -639,4 +639,22 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
kvm_write_cptr_el2(val); } + +#ifdef CONFIG_HISI_VIRTCCA_HOST +static inline bool kvm_is_virtcca_cvm(struct kvm *kvm) +{ + if (static_branch_unlikely(&virtcca_cvm_is_available)) + return kvm->arch.is_virtcca_cvm; + return false; +} + +static inline enum virtcca_cvm_state virtcca_cvm_state(struct kvm *kvm) +{ + struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; + + if (!virtcca_cvm) + return 0; + return READ_ONCE(virtcca_cvm->state); +} +#endif #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 66c0bb96f..ac8115098 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -27,7 +27,7 @@ #include <asm/fpsimd.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> -#ifdef CONFIG_CVM_HOST +#ifdef CONFIG_HISI_VIRTCCA_HOST #include <asm/kvm_tmm.h> #endif
@@ -292,9 +292,12 @@ struct kvm_arch { u64 tlbi_dvmbm; #endif
-#ifdef CONFIG_CVM_HOST - struct cvm cvm; - bool is_cvm; +#ifdef CONFIG_HISI_VIRTCCA_HOST + union { + struct cvm cvm; + struct virtcca_cvm *virtcca_cvm; + }; + bool is_virtcca_cvm; #endif };
@@ -622,8 +625,8 @@ struct kvm_vcpu_arch { cpumask_var_t pre_sched_cpus; #endif
-#ifdef CONFIG_CVM_HOST - struct cvm_tec tec; +#ifdef CONFIG_HISI_VIRTCCA_HOST + struct virtcca_cvm_tec tec; #endif };
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h new file mode 100644 index 000000000..fc4fe9a71 --- /dev/null +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, The Linux Foundation. All rights reserved. + */ +#ifndef __TMM_TMI_H +#define __TMM_TMI_H +#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <linux/kvm_host.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_pgtable.h> +#include <linux/virtio_ring.h> +#include <asm/sysreg.h> + +#define GRANULE_SIZE 4096 + +#define NO_NUMA 0 /* numa bitmap */ + +#define TMM_TTT_LEVEL_2 2 +#define TMM_TTT_LEVEL_3 3 + +/* TMI error codes. */ +#define TMI_SUCCESS 0 +#define TMI_ERROR_INPUT 1 +#define TMI_ERROR_MEMORY 2 +#define TMI_ERROR_ALIAS 3 +#define TMI_ERROR_IN_USE 4 +#define TMI_ERROR_CVM_STATE 5 +#define TMI_ERROR_OWNER 6 +#define TMI_ERROR_TEC 7 +#define TMI_ERROR_TTT_WALK 8 +#define TMI_ERROR_TTT_ENTRY 9 +#define TMI_ERROR_NOT_SUPPORTED 10 +#define TMI_ERROR_INTERNAL 11 +#define TMI_ERROR_CVM_POWEROFF 12 +#define TMI_ERROR_TTT_CREATED 13 + +#define TMI_RETURN_STATUS(ret) ((ret) & 0xFF) +#define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF) + +#define TMI_FEATURE_REGISTER_0_S2SZ GENMASK(7, 0) +#define TMI_FEATURE_REGISTER_0_LPA2 BIT(8) +#define TMI_FEATURE_REGISTER_0_SVE_EN BIT(9) +#define TMI_FEATURE_REGISTER_0_SVE_VL GENMASK(13, 10) +#define TMI_FEATURE_REGISTER_0_NUM_BPS GENMASK(17, 14) +#define TMI_FEATURE_REGISTER_0_NUM_WPS GENMASK(21, 18) +#define TMI_FEATURE_REGISTER_0_PMU_EN BIT(22) +#define TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS GENMASK(27, 23) +#define TMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(28) +#define TMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(29) + +#define TMI_CVM_PARAM_FLAG_LPA2 BIT(0) +#define TMI_CVM_PARAM_FLAG_SVE BIT(1) +#define TMI_CVM_PARAM_FLAG_PMU BIT(2) + +#define TMI_NOT_RUNNABLE 0 +#define TMI_RUNNABLE 1 + +/* + * The number of GPRs (starting from X0) that are + * configured by the host when a TEC is created. + */ +#define TEC_CREATE_NR_GPRS (8U) + +struct tmi_tec_params { + uint64_t gprs[TEC_CREATE_NR_GPRS]; + uint64_t pc; + uint64_t flags; + uint64_t ram_size; +}; + +#define TEC_ENTRY_FLAG_EMUL_MMIO (1UL << 0U) +#define TEC_ENTRY_FLAG_INJECT_SEA (1UL << 1U) +#define TEC_ENTRY_FLAG_TRAP_WFI (1UL << 2U) +#define TEC_ENTRY_FLAG_TRAP_WFE (1UL << 3U) + +#define TMI_EXIT_SYNC 0 +#define TMI_EXIT_IRQ 1 +#define TMI_EXIT_FIQ 2 +#define TMI_EXIT_PSCI 3 +#define TMI_EXIT_HOST_CALL 5 +#define TMI_EXIT_SERROR 6 + +/* + * The number of GPRs (starting from X0) per voluntary exit context. + * Per SMCCC. + */ + #define TEC_EXIT_NR_GPRS (31U) + +/* Maximum number of Interrupt Controller List Registers. */ +#define TEC_GIC_NUM_LRS (16U) + +struct tmi_tec_entry { + uint64_t flags; + uint64_t gprs[TEC_EXIT_NR_GPRS]; + uint64_t gicv3_lrs[TEC_GIC_NUM_LRS]; + uint64_t gicv3_hcr; +}; + +struct tmi_tec_exit { + uint64_t exit_reason; + uint64_t esr; + uint64_t far; + uint64_t hpfar; + uint64_t gprs[TEC_EXIT_NR_GPRS]; + uint64_t gicv3_hcr; + uint64_t gicv3_lrs[TEC_GIC_NUM_LRS]; + uint64_t gicv3_misr; + uint64_t gicv3_vmcr; + uint64_t cntv_ctl; + uint64_t cntv_cval; + uint64_t cntp_ctl; + uint64_t cntp_cval; + uint64_t imm; + uint64_t pmu_ovf_status; +}; + +struct tmi_tec_run { + struct tmi_tec_entry tec_entry; + struct tmi_tec_exit tec_exit; +}; + +#define TMI_FNUM_MIN_VALUE U(0x150) +#define TMI_FNUM_MAX_VALUE U(0x18F) + +/****************************************************************************** + * Bit definitions inside the function id as per the SMC calling convention + ******************************************************************************/ +#define FUNCID_TYPE_SHIFT 31 +#define FUNCID_CC_SHIFT 30 +#define FUNCID_OEN_SHIFT 24 +#define FUNCID_NUM_SHIFT 0 + +#define FUNCID_TYPE_MASK 0x1 +#define FUNCID_CC_MASK 0x1 +#define FUNCID_OEN_MASK 0x3f +#define FUNCID_NUM_MASK 0xffff + +#define FUNCID_TYPE_WIDTH 1 +#define FUNCID_CC_WIDTH 1 +#define FUNCID_OEN_WIDTH 6 +#define FUNCID_NUM_WIDTH 16 + +#define SMC_64 1 +#define SMC_32 0 +#define SMC_TYPE_FAST 1 +#define SMC_TYPE_STD 0 + +/***************************************************************************** + * Owning entity number definitions inside the function id as per the SMC + * calling convention + *****************************************************************************/ +#define OEN_ARM_START 0 +#define OEN_ARM_END 0 +#define OEN_CPU_START 1 +#define OEN_CPU_END 1 +#define OEN_SIP_START 2 +#define OEN_SIP_END 2 +#define OEN_OEM_START 3 +#define OEN_OEM_END 3 +#define OEN_STD_START 4 /* Standard Calls */ +#define OEN_STD_END 4 +#define OEN_TAP_START 48 /* Trusted Applications */ +#define OEN_TAP_END 49 +#define OEN_TOS_START 50 /* Trusted OS */ +#define OEN_TOS_END 63 +#define OEN_LIMIT 64 + +/* Get TMI fastcall std FID from function number */ +#define TMI_FID(smc_cc, func_num) \ + ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \ + ((smc_cc) << FUNCID_CC_SHIFT) | \ + (OEN_STD_START << FUNCID_OEN_SHIFT) | \ + ((func_num) << FUNCID_NUM_SHIFT)) + +#define U(_x) (_x##U) + +#define TMI_NO_MEASURE_CONTENT U(0) +#define TMI_MEASURE_CONTENT U(1) + +#define CVM_IPA_MAX_VAL (1UL << 48) + +/* + * SMC_TMM_INIT_COMPLETE is the only function in the TMI that originates from + * the CVM world and is handled by the SPMD. The remaining functions are + * always invoked by the Normal world, forward by SPMD and handled by the + * TMM. + */ +#define TMI_FNUM_VERSION_REQ U(0x260) +#define TMI_FNUM_MEM_INFO_SHOW U(0x261) +#define TMI_FNUM_DATA_CREATE U(0x262) +#define TMI_FNUM_DATA_DESTROY U(0x263) +#define TMI_FNUM_CVM_ACTIVATE U(0x264) +#define TMI_FNUM_CVM_CREATE U(0x265) +#define TMI_FNUM_CVM_DESTROY U(0x266) +#define TMI_FNUM_TEC_CREATE U(0x267) +#define TMI_FNUM_TEC_DESTROY U(0x268) +#define TMI_FNUM_TEC_ENTER U(0x269) +#define TMI_FNUM_TTT_CREATE U(0x26A) +#define TMI_FNUM_PSCI_COMPLETE U(0x26B) +#define TMI_FNUM_FEATURES U(0x26C) +#define TMI_FNUM_TTT_MAP_RANGE U(0x26D) +#define TMI_FNUM_TTT_UNMAP_RANGE U(0x26E) +#define TMI_FNUM_INF_TEST U(0x270) + +/* TMI SMC64 PIDs handled by the SPMD */ +#define TMI_TMM_VERSION_REQ TMI_FID(SMC_64, TMI_FNUM_VERSION_REQ) +#define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE) +#define TMI_TMM_DATA_DESTROY TMI_FID(SMC_64, TMI_FNUM_DATA_DESTROY) +#define TMI_TMM_CVM_ACTIVATE TMI_FID(SMC_64, TMI_FNUM_CVM_ACTIVATE) +#define TMI_TMM_CVM_CREATE TMI_FID(SMC_64, TMI_FNUM_CVM_CREATE) +#define TMI_TMM_CVM_DESTROY TMI_FID(SMC_64, TMI_FNUM_CVM_DESTROY) +#define TMI_TMM_TEC_CREATE TMI_FID(SMC_64, TMI_FNUM_TEC_CREATE) +#define TMI_TMM_TEC_DESTROY TMI_FID(SMC_64, TMI_FNUM_TEC_DESTROY) +#define TMI_TMM_TEC_ENTER TMI_FID(SMC_64, TMI_FNUM_TEC_ENTER) +#define TMI_TMM_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_TTT_CREATE) +#define TMI_TMM_PSCI_COMPLETE TMI_FID(SMC_64, TMI_FNUM_PSCI_COMPLETE) +#define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES) +#define TMI_TMM_MEM_INFO_SHOW TMI_FID(SMC_64, TMI_FNUM_MEM_INFO_SHOW) +#define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE) +#define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE) +#define TMI_TMM_INF_TEST TMI_FID(SMC_64, TMI_FNUM_INF_TEST) + +#define TMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16) +#define TMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF) + +#define TMI_ABI_VERSION_MAJOR U(0x1) + +/* KVM_CAP_ARM_TMM on VM fd */ +#define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0 +#define KVM_CAP_ARM_TMM_CREATE_RD 1 +#define KVM_CAP_ARM_TMM_POPULATE_CVM 2 +#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3 + +#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0 +#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1 + +#define KVM_CAP_ARM_TMM_RPV_SIZE 64 + +/* List of configuration items accepted for KVM_CAP_ARM_TMM_CONFIG_CVM_HOST */ +#define KVM_CAP_ARM_TMM_CFG_RPV 0 +#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1 +#define KVM_CAP_ARM_TMM_CFG_SVE 2 +#define KVM_CAP_ARM_TMM_CFG_DBG 3 +#define KVM_CAP_ARM_TMM_CFG_PMU 4 + +DECLARE_STATIC_KEY_FALSE(virtcca_cvm_is_available); +DECLARE_STATIC_KEY_FALSE(virtcca_cvm_is_enable); + +struct kvm_cap_arm_tmm_config_item { + __u32 cfg; + union { + /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */ + struct { + __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE]; + }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */ + struct { + __u32 hash_algo; + }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */ + struct { + __u32 sve_vq; + }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */ + struct { + __u32 num_brps; + __u32 num_wrps; + }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */ + struct { + __u32 num_pmu_cntrs; + }; + /* Fix the size of the union */ + __u8 reserved[256]; + }; +}; + +#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) +struct kvm_cap_arm_tmm_populate_region_args { + __u64 populate_ipa_base1; + __u64 populate_ipa_size1; + __u64 populate_ipa_base2; + __u64 populate_ipa_size2; + __u32 flags; + __u32 reserved[3]; +}; + +static inline bool tmm_is_addr_ttt_level_aligned(uint64_t addr, int level) +{ + uint64_t mask = (1 << (12 + 9 * (3 - level))) - 1; + + return (addr & mask) == 0; +} + +#define ID_AA64PFR0_SEL2_MASK ULL(0xf) +#define ID_AA64PFR0_SEL2_SHIFT 36 + +static inline bool is_armv8_4_sel2_present(void) +{ + return ((read_sysreg(id_aa64pfr0_el1) >> ID_AA64PFR0_SEL2_SHIFT) & + ID_AA64PFR0_SEL2_MASK) == 1UL; +} + +u64 tmi_version(void); +u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level); +u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level); +u64 tmi_cvm_activate(u64 rd); +u64 tmi_cvm_create(u64 params_ptr, u64 numa_set); +u64 tmi_cvm_destroy(u64 rd); +u64 tmi_tec_create(u64 numa_set, u64 rd, u64 mpidr, u64 params_ptr); +u64 tmi_tec_destroy(u64 tec); +u64 tmi_tec_enter(u64 tec, u64 run_ptr); +u64 tmi_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level); +u64 tmi_psci_complete(u64 calling_tec, u64 target_tec); +u64 tmi_features(u64 index); +u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node); +u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id); +u64 tmi_mem_info_show(u64 mem_info_addr); + +void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu); +int kvm_load_user_data(struct kvm *kvm, unsigned long arg); +unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu, + unsigned long target_affinity, unsigned long lowest_affinity_level); +int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu, + bool serror_pending, bool ext_dabt_pending); +int kvm_init_cvm_vm(struct kvm *kvm); +int kvm_enable_virtcca_cvm(struct kvm *kvm); +#endif +#endif diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index f70d73be0..96532ca8f 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -7,8 +7,8 @@
#include <uapi/linux/kvm.h>
-enum cvm_state { - CVM_STATE_NONE, +enum virtcca_cvm_state { + CVM_STATE_NONE = 1, CVM_STATE_NEW, CVM_STATE_ACTIVE, CVM_STATE_DYING @@ -36,7 +36,7 @@ struct tmi_cvm_params { };
struct cvm { - enum cvm_state state; + enum virtcca_cvm_state state; u32 cvm_vmid; u64 rd; u64 loader_start; @@ -48,10 +48,23 @@ struct cvm { bool is_cvm; };
+struct virtcca_cvm { + enum virtcca_cvm_state state; + u32 cvm_vmid; + u64 rd; + u64 loader_start; + u64 image_end; + u64 initrd_start; + u64 dtb_end; + u64 ram_size; + struct kvm_numa_info numa_info; + struct tmi_cvm_params *params; +}; + /* * struct cvm_tec - Additional per VCPU data for a CVM */ -struct cvm_tec { +struct virtcca_cvm_tec { u64 tec; bool tec_created; void *tec_run; @@ -59,22 +72,19 @@ struct cvm_tec {
int kvm_init_tmm(void); int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); -int kvm_init_cvm_vm(struct kvm *kvm); void kvm_destroy_cvm(struct kvm *kvm); -int kvm_create_tec(struct kvm_vcpu *vcpu); +int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu); void kvm_destroy_tec(struct kvm_vcpu *vcpu); int kvm_tec_enter(struct kvm_vcpu *vcpu); int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status); int kvm_arm_create_cvm(struct kvm *kvm); void kvm_free_rd(struct kvm *kvm); -int cvm_create_rd(struct kvm *kvm); int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target); -int kvm_arch_tec_init(struct kvm_vcpu *vcpu);
void kvm_cvm_unmap_destroy_range(struct kvm *kvm);
-#define CVM_TTT_BLOCK_LEVEL 2 -#define CVM_TTT_MAX_LEVEL 3 +#define CVM_TTT_BLOCK_LEVEL 2 +#define CVM_TTT_MAX_LEVEL 3
#define CVM_PAGE_SHIFT 12 #define CVM_PAGE_SIZE BIT(CVM_PAGE_SHIFT) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f7ddd73a8..97941e582 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -110,6 +110,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ #define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */ +#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */
struct kvm_vcpu_init { __u32 target; @@ -415,6 +416,9 @@ enum { #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 #define KVM_DEV_ARM_ITS_CTRL_RESET 4
+#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256 0 +#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512 1 + /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 #define KVM_ARM_VCPU_PMU_V3_IRQ 0 diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 1fa6fba60..52edbd7f6 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -49,11 +49,11 @@ menuconfig KVM
If unsure, say N.
-config CVM_HOST - bool "Enable cvm host feature" +config HISI_VIRTCCA_HOST + bool "Enable virtcca cvm host feature" depends on KVM help - Support CVM based on S-EL2 + Support VIRTCCA CVM based on S-EL2
If unsure, say N.
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 952eee572..eadf41417 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -24,6 +24,9 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o pvsched.o \
kvm-$(CONFIG_VIRT_PLAT_DEV) += vgic/shadow_dev.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o +kvm-$(CONFIG_HISI_VIRTCCA_HOST) += tmi.o +kvm-$(CONFIG_HISI_VIRTCCA_HOST) += virtcca_cvm.o +kvm-$(CONFIG_HISI_VIRTCCA_HOST) += virtcca_cvm_exit.o obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/
always-y := hyp_constants.h hyp-constants.s diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 43957fce5..205361d1b 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -18,6 +18,10 @@ #include <asm/kvm_hyp.h> #include <asm/kvm_nested.h>
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif + #include <kvm/arm_vgic.h> #include <kvm/arm_arch_timer.h>
@@ -175,6 +179,71 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) } }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static bool cvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) +{ + return timer_ctx && + ((timer_get_ctl(timer_ctx) & + (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE); +} + +void kvm_cvm_timers_update(struct kvm_vcpu *vcpu) +{ + int i; + u64 cval, now; + bool status, level; + struct arch_timer_context *timer; + struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu; + + for (i = 0; i < NR_KVM_TIMERS; i++) { + timer = &arch_timer->timers[i]; + + if (!timer->loaded) { + if (!cvm_timer_irq_can_fire(timer)) + continue; + cval = timer_get_cval(timer); + now = kvm_phys_timer_read() - timer_get_offset(timer); + level = (cval <= now); + kvm_timer_update_irq(vcpu, level, timer); + } else { + status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT; + level = cvm_timer_irq_can_fire(timer) && status; + if (level != timer->irq.level) + kvm_timer_update_irq(vcpu, level, timer); + } + } +} + +static void set_cvm_timers_loaded(struct kvm_vcpu *vcpu, bool loaded) +{ + int i; + struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu; + + for (i = 0; i < NR_KVM_TIMERS; i++) { + struct arch_timer_context *timer = &arch_timer->timers[i]; + + timer->loaded = loaded; + } +} + +static void kvm_timer_blocking(struct kvm_vcpu *vcpu); +static void kvm_timer_unblocking(struct kvm_vcpu *vcpu); + +static inline void cvm_vcpu_load_timer_callback(struct kvm_vcpu *vcpu) +{ + kvm_cvm_timers_update(vcpu); + kvm_timer_unblocking(vcpu); + set_cvm_timers_loaded(vcpu, true); +} + +static inline void cvm_vcpu_put_timer_callback(struct kvm_vcpu *vcpu) +{ + set_cvm_timers_loaded(vcpu, false); + if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu))) + kvm_timer_blocking(vcpu); +} +#endif + static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) { if (!ctxt->offset.vm_offset) { @@ -883,6 +952,13 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + cvm_vcpu_load_timer_callback(vcpu); + return; + } +#endif + if (unlikely(!timer->enabled)) return;
@@ -981,6 +1057,13 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + cvm_vcpu_put_timer_callback(vcpu); + return; + } +#endif + if (unlikely(!timer->enabled)) return;
@@ -1766,6 +1849,15 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) return -EINVAL; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST + /* + * We don't use mapped IRQs for CVM because the TMI doesn't allow + * us setting the LR.HW bit in the VGIC. + */ + if (vcpu_is_tec(vcpu)) + return 0; +#endif + get_timer_map(vcpu, &map);
#ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 333c65ced..cff0affca 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -40,6 +40,9 @@ #include <asm/kvm_pkvm.h> #include <asm/kvm_emulate.h> #include <asm/sections.h> +#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif
#include <kvm/arm_hypercalls.h> #include <kvm/arm_pmu.h> @@ -132,6 +135,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, } mutex_unlock(&kvm->slots_lock); break; +#ifdef CONFIG_HISI_VIRTCCA_HOST + case KVM_CAP_ARM_TMM: + if (static_branch_unlikely(&virtcca_cvm_is_available)) + r = kvm_cvm_enable_cap(kvm, cap); + break; +#endif default: r = -EINVAL; break; @@ -153,6 +162,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm_arm_cvm_type(type)) { + ret = kvm_enable_virtcca_cvm(kvm); + if (ret) + return ret; + } +#endif + ret = kvm_sched_affinity_vm_init(kvm); if (ret) return ret; @@ -199,8 +216,20 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm_arm_cvm_type(type)) { + ret = kvm_init_cvm_vm(kvm); + if (ret) + goto out_free_stage2_pgd; + } +#endif + return 0;
+#ifdef CONFIG_HISI_VIRTCCA_HOST +out_free_stage2_pgd: + kvm_free_stage2_pgd(&kvm->arch.mmu); +#endif err_free_cpumask: free_cpumask_var(kvm->arch.supported_cpus); err_unshare_kvm: @@ -235,6 +264,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_unshare_hyp(kvm, kvm + 1);
kvm_arm_teardown_hypercalls(kvm); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm_is_virtcca_cvm(kvm)) + kvm_destroy_cvm(kvm); +#endif }
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) @@ -306,7 +339,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = system_supports_mte(); break; case KVM_CAP_STEAL_TIME: - r = kvm_arm_pvtime_supported(); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (kvm && kvm_is_virtcca_cvm(kvm)) + r = 0; + else +#endif + r = kvm_arm_pvtime_supported(); break; case KVM_CAP_ARM_EL1_32BIT: r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); @@ -346,6 +384,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_VIRT_MSI_BYPASS: r = sdev_enable; break; +#endif +#ifdef CONFIG_HISI_VIRTCCA_HOST + case KVM_CAP_ARM_TMM: + if (!is_armv8_4_sel2_present()) { + r = -ENXIO; + break; + } + r = static_key_enabled(&virtcca_cvm_is_available); + break; #endif default: r = 0; @@ -499,8 +546,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + if (single_task_running()) + vcpu_clear_wfx_traps(vcpu); + else + vcpu_set_wfx_traps(vcpu); + } +#endif kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); + return; + } +#endif if (has_vhe()) kvm_vcpu_load_sysregs_vhe(vcpu); kvm_arch_vcpu_load_fp(vcpu); @@ -531,6 +593,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + kvm_cvm_vcpu_put(vcpu); + return; + } +#endif kvm_arch_vcpu_put_debug_state_flags(vcpu); kvm_arch_vcpu_put_fp(vcpu); if (has_vhe()) @@ -705,6 +773,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) * Tell the rest of the code that there are userspace irqchip * VMs in the wild. */ +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (!kvm_is_virtcca_cvm(kvm)) +#endif static_branch_inc(&userspace_irqchip_in_use); }
@@ -1076,7 +1147,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) trace_kvm_entry(*vcpu_pc(vcpu)); guest_timing_enter_irqoff();
- ret = kvm_arm_vcpu_enter_exit(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + ret = kvm_tec_enter(vcpu); + else +#endif + ret = kvm_arm_vcpu_enter_exit(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->stat.exits++; @@ -1130,11 +1206,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
local_irq_enable();
- trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (!vcpu_is_tec(vcpu)) { +#endif + trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
- /* Exit types that need handling before we can be preempted */ - handle_exit_early(vcpu, ret); + /* Exit types that need handling before we can be preempted */ + handle_exit_early(vcpu, ret);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + } +#endif preempt_enable();
/* @@ -1156,7 +1238,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ret = ARM_EXCEPTION_IL; }
- ret = handle_exit(vcpu, ret); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + ret = handle_cvm_exit(vcpu, ret); + else +#endif + ret = handle_exit(vcpu, ret); #ifdef CONFIG_ARCH_VCPU_STAT update_vcpu_stat_time(&vcpu->stat); #endif @@ -1669,6 +1756,11 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm_device_attr attr;
switch (ioctl) { +#ifdef CONFIG_HISI_VIRTCCA_HOST + case KVM_LOAD_USER_DATA: { + return kvm_load_user_data(kvm, arg); + } +#endif case KVM_CREATE_IRQCHIP: { int ret; if (!vgic_present) @@ -2555,6 +2647,14 @@ static __init int kvm_arm_init(void)
in_hyp_mode = is_kernel_in_hyp_mode();
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (static_branch_unlikely(&virtcca_cvm_is_enable) && in_hyp_mode) { + err = kvm_init_tmm(); + if (err) + return err; + } +#endif + if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || cpus_have_final_cap(ARM64_WORKAROUND_1508412)) kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 3ebbb81b1..ae263604c 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -27,6 +27,10 @@ #include <asm/kvm_nested.h> #include <asm/sigcontext.h>
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif + #include "trace.h"
#ifdef CONFIG_ARCH_VCPU_STAT @@ -874,6 +878,10 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, bool has_esr = events->exception.serror_has_esr; bool ext_dabt_pending = events->exception.ext_dabt_pending;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return kvm_cvm_vcpu_set_events(vcpu, serror_pending, ext_dabt_pending); +#endif if (serror_pending && has_esr) { if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) return -EINVAL; diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index 3dd38a151..f2930050c 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -8,6 +8,10 @@ #include <asm/kvm_emulate.h> #include <trace/events/kvm.h>
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif + #include "trace.h"
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data) @@ -109,6 +113,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) &data); data = vcpu_data_host_to_guest(vcpu, data, len); vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)-> + tec_entry.gprs[0] = data; + } +#endif }
/* @@ -178,7 +188,12 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) run->mmio.phys_addr = fault_ipa; run->mmio.len = len; vcpu->mmio_needed = 1; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags |= + TEC_ENTRY_FLAG_EMUL_MMIO; + } +#endif if (!ret) { /* We handled the access successfully in the kernel. */ if (!is_write) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 121a3d902..15c68b3c8 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1013,6 +1013,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
write_lock(&kvm->mmu_lock); pgt = mmu->pgt; + if (pgt) { mmu->pgd_phys = 0; mmu->pgt = NULL; @@ -1414,6 +1415,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); write_fault = kvm_is_write_fault(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + write_fault = true; + prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W; + } +#endif exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); vcpu->stat.mabt_exit_stat++; diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 1f69b6673..c611048fc 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -16,6 +16,9 @@ #include <kvm/arm_psci.h> #include <kvm/arm_hypercalls.h>
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif /* * This is an implementation of the Power State Coordination Interface * as described in ARM document number ARM DEN 0022A. @@ -79,6 +82,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) return PSCI_RET_INVALID_PARAMS;
spin_lock(&vcpu->arch.mp_state_lock); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + cvm_psci_complete(source_vcpu, vcpu); +#endif if (!kvm_arm_vcpu_stopped(vcpu)) { if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) ret = PSCI_RET_ALREADY_ON; @@ -141,7 +148,10 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
/* Ignore other bits of target affinity */ target_affinity &= target_affinity_mask; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return cvm_psci_vcpu_affinity_info(vcpu, target_affinity, lowest_affinity_level); +#endif /* * If one or more VCPU matching target affinity are running * then ON else OFF diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 7a65a35ee..cfb01c491 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -30,6 +30,9 @@ #include <asm/kvm_nested.h> #include <asm/virt.h>
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif /* Maximum phys_shift supported for any VM on this host */ static u32 __ro_after_init kvm_ipa_limit;
@@ -139,6 +142,12 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) return -EPERM;
return kvm_vcpu_finalize_sve(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + case KVM_ARM_VCPU_TEC: + if (!kvm_is_virtcca_cvm(vcpu->kvm)) + return -EINVAL; + return kvm_finalize_vcpu_tec(vcpu); +#endif }
return -EINVAL; @@ -162,6 +171,10 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); kfree(sve_state); kfree(vcpu->arch.ccsidr); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + kvm_destroy_tec(vcpu); +#endif }
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c new file mode 100644 index 000000000..c1f22139d --- /dev/null +++ b/arch/arm64/kvm/tmi.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, The Linux Foundation. All rights reserved. + */ +#include <linux/arm-smccc.h> +#include <asm/kvm_tmi.h> +#include <asm/memory.h> + +u64 tmi_version(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_VERSION_REQ, &res); + return res.a1; +} + +u64 tmi_data_create(u64 numa_set, u64 rd, u64 map_addr, u64 src, u64 level) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_DATA_CREATE, numa_set, rd, map_addr, src, level, &res); + return res.a1; +} + +u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_DATA_DESTROY, rd, map_addr, level, &res); + return res.a1; +} + +u64 tmi_cvm_activate(u64 rd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_CVM_ACTIVATE, rd, &res); + return res.a1; +} + +u64 tmi_cvm_create(u64 params_ptr, u64 numa_set) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, params_ptr, numa_set, &res); + return res.a1; +} + +u64 tmi_cvm_destroy(u64 rd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_CVM_DESTROY, rd, &res); + return res.a1; +} + +u64 tmi_tec_create(u64 numa_set, u64 rd, u64 mpidr, u64 params_ptr) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_TEC_CREATE, numa_set, rd, mpidr, params_ptr, &res); + return res.a1; +} + +u64 tmi_tec_destroy(u64 tec) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_TEC_DESTROY, tec, &res); + return res.a1; +} + +u64 tmi_tec_enter(u64 tec, u64 run_ptr) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_TEC_ENTER, tec, run_ptr, &res); + return res.a1; +} + +u64 tmi_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_TTT_CREATE, numa_set, rd, map_addr, level, &res); + return res.a1; +} + +u64 tmi_psci_complete(u64 calling_tec, u64 target_tec) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_PSCI_COMPLETE, calling_tec, target_tec, &res); + return res.a1; +} + +u64 tmi_features(u64 index) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_FEATURES, index, &res); + return res.a1; +} + +u64 tmi_mem_info_show(u64 mem_info_addr) +{ + struct arm_smccc_res res; + u64 pa_addr = __pa(mem_info_addr); + + arm_smccc_1_1_smc(TMI_TMM_MEM_INFO_SHOW, pa_addr, &res); + return res.a1; +} +EXPORT_SYMBOL_GPL(tmi_mem_info_show); + +u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_RANGE, rd, map_addr, size, cur_node, target_node, &res); + return res.a1; +} + +u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_RANGE, rd, map_addr, size, node_id, &res); + return res.a1; +} + +u64 tmi_tmm_inf_test(u64 x1, u64 x2, u64 x3, u64 x4, u64 x5) +{ + struct arm_smccc_res res; + u64 vttbr_el2_pa = __pa(x2); + u64 cvm_params_pa = __pa(x3); + u64 tec_params_pa = __pa(x4); + + arm_smccc_1_1_smc(TMI_TMM_INF_TEST, x1, vttbr_el2_pa, cvm_params_pa, tec_params_pa, x5, &res); + return res.a1; +} +EXPORT_SYMBOL_GPL(tmi_tmm_inf_test); \ No newline at end of file diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 69ca111e3..f9afb1dd3 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -11,6 +11,10 @@ #include <asm/kvm_mmu.h> #include <asm/kvm_asm.h>
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif + #include "vgic.h"
static bool group0_trap; @@ -680,7 +684,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info) (unsigned long long)info->vcpu.start); } else if (kvm_get_mode() != KVM_MODE_PROTECTED) { kvm_vgic_global_state.vcpu_base = info->vcpu.start; - kvm_vgic_global_state.can_emulate_gicv2 = true; +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (!static_branch_unlikely(&virtcca_cvm_is_available)) +#endif + kvm_vgic_global_state.can_emulate_gicv2 = true; ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); if (ret) { kvm_err("Cannot register GICv2 KVM device.\n"); @@ -760,7 +767,13 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + cpu_if->vgic_vmcr = + ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_exit.gicv3_vmcr; + return; + } +#endif if (likely(cpu_if->vgic_sre)) cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); } diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 2459b0ade..03ba0e01a 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -12,6 +12,10 @@
#include <asm/kvm_hyp.h>
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#endif + #include "vgic.h"
#define CREATE_TRACE_POINTS @@ -897,12 +901,44 @@ static inline bool can_access_vgic_from_kernel(void) return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe(); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static inline void vgic_tmm_save_state(struct kvm_vcpu *vcpu) +{ + int i; + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run; + + for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) { + cpu_if->vgic_lr[i] = tec_run->tec_exit.gicv3_lrs[i]; + tec_run->tec_entry.gicv3_lrs[i] = 0; + } +} + +static inline void vgic_tmm_restore_state(struct kvm_vcpu *vcpu) +{ + int i; + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run; + + for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) { + tec_run->tec_entry.gicv3_lrs[i] = cpu_if->vgic_lr[i]; + tec_run->tec_exit.gicv3_lrs[i] = cpu_if->vgic_lr[i]; + } +} +#endif + static inline void vgic_save_state(struct kvm_vcpu *vcpu) { if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_save_state(vcpu); else - __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + vgic_tmm_save_state(vcpu); + else +#endif + __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); + }
/* Sync back the hardware VGIC state into our emulation after a guest's run. */ @@ -932,7 +968,12 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu) if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_restore_state(vcpu); else - __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + vgic_tmm_restore_state(vcpu); + else +#endif + __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); }
/* Flush our emulation state into the GIC hardware before entering the guest. */ @@ -973,7 +1014,10 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu) { if (unlikely(!vgic_initialized(vcpu->kvm))) return; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return; +#endif if (kvm_vgic_global_state.type == VGIC_V2) vgic_v2_load(vcpu); else @@ -984,7 +1028,10 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu) { if (unlikely(!vgic_initialized(vcpu->kvm))) return; - +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return; +#endif if (kvm_vgic_global_state.type == VGIC_V2) vgic_v2_put(vcpu); else diff --git a/arch/arm64/kvm/virtcca_cvm.c b/arch/arm64/kvm/virtcca_cvm.c new file mode 100644 index 000000000..367bbf4fa --- /dev/null +++ b/arch/arm64/kvm/virtcca_cvm.c @@ -0,0 +1,889 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, The Linux Foundation. All rights reserved. + */ +#include <linux/kvm_host.h> +#include <linux/kvm.h> +#include <asm/kvm_tmi.h> +#include <asm/kvm_pgtable.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_mmu.h> +#include <asm/stage2_pgtable.h> +#include <linux/arm-smccc.h> +#include <kvm/arm_hypercalls.h> +#include <kvm/arm_psci.h> + +/* Protects access to cvm_vmid_bitmap */ +static DEFINE_SPINLOCK(cvm_vmid_lock); +static unsigned long *cvm_vmid_bitmap; +DEFINE_STATIC_KEY_FALSE(virtcca_cvm_is_available); +DEFINE_STATIC_KEY_FALSE(virtcca_cvm_is_enable); +#define SIMD_PAGE_SIZE 0x3000 + +int kvm_enable_virtcca_cvm(struct kvm *kvm) +{ + if (!static_key_enabled(&virtcca_cvm_is_available)) + return -EFAULT; + + kvm->arch.is_virtcca_cvm = true; + return 0; +} + +static int __init setup_virtcca_cvm_host(char *str) +{ + int ret; + unsigned int val; + + if (!str) + return 0; + + ret = kstrtouint(str, 10, &val); + if (ret) { + pr_warn("Unable to parse cvm_guest.\n"); + } else { + if (val) + static_branch_enable(&virtcca_cvm_is_enable); + } + return ret; +} +early_param("virtcca_cvm_host", setup_virtcca_cvm_host); + +static int cvm_vmid_init(void) +{ + unsigned int vmid_count = 1 << kvm_get_vmid_bits(); + + cvm_vmid_bitmap = bitmap_zalloc(vmid_count, GFP_KERNEL); + if (!cvm_vmid_bitmap) { + kvm_err("%s: Couldn't allocate cvm vmid bitmap\n", __func__); + return -ENOMEM; + } + return 0; +} + +static unsigned long tmm_feat_reg0; + +static bool tmm_supports(unsigned long feature) +{ + return !!u64_get_bits(tmm_feat_reg0, feature); +} + +bool kvm_cvm_supports_sve(void) +{ + return tmm_supports(TMI_FEATURE_REGISTER_0_SVE_EN); +} + +bool kvm_cvm_supports_pmu(void) +{ + return tmm_supports(TMI_FEATURE_REGISTER_0_PMU_EN); +} + +u32 kvm_cvm_ipa_limit(void) +{ + return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_S2SZ); +} + +u32 kvm_cvm_get_num_brps(void) +{ + return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_BPS); +} + +u32 kvm_cvm_get_num_wrps(void) +{ + return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_WPS); +} + +static int cvm_vmid_reserve(void) +{ + int ret; + unsigned int vmid_count = 1 << kvm_get_vmid_bits(); + + spin_lock(&cvm_vmid_lock); + ret = bitmap_find_free_region(cvm_vmid_bitmap, vmid_count, 0); + spin_unlock(&cvm_vmid_lock); + + return ret; +} + +static void cvm_vmid_release(unsigned int vmid) +{ + spin_lock(&cvm_vmid_lock); + bitmap_release_region(cvm_vmid_bitmap, vmid, 0); + spin_unlock(&cvm_vmid_lock); +} + +static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) +{ + u64 shift = ARM64_HW_PGTABLE_LEVEL_SHIFT(pgt->start_level - 1); + u64 mask = BIT(pgt->ia_bits) - 1; + + return (addr & mask) >> shift; +} + +static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) +{ + struct kvm_pgtable pgt = { + .ia_bits = ia_bits, + .start_level = start_level, + }; + return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; +} + +/* + * the configurable physical numa range in QEMU is 0-127, + * but in real scenarios, 0-63 is sufficient. + */ +static u64 kvm_get_host_numa_set_by_vcpu(u64 vcpu, struct kvm *kvm) +{ + int64_t i; + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + + for (i = 0; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) { + if (test_bit(vcpu, (unsigned long *)numa_info->numa_nodes[i].cpu_id)) + return numa_info->numa_nodes[i].host_numa_nodes[0]; + } + return NO_NUMA; +} + +static u64 kvm_get_first_binded_numa_set(struct kvm *kvm) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + + if (numa_info->numa_cnt > 0) + return numa_info->numa_nodes[0].host_numa_nodes[0]; + return NO_NUMA; +} + +int kvm_arm_create_cvm(struct kvm *kvm) +{ + int ret; + struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; + unsigned int pgd_sz; + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + /* get affine host numa set by default vcpu 0 */ + u64 numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm); + + if (!kvm_is_virtcca_cvm(kvm) || virtcca_cvm_state(kvm) != CVM_STATE_NONE) + return 0; + + if (!cvm->params) { + ret = -EFAULT; + goto out; + } + + ret = cvm_vmid_reserve(); + if (ret < 0) + goto out; + + cvm->cvm_vmid = ret; + + pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level); + + cvm->params->ttt_level_start = kvm->arch.mmu.pgt->start_level; + cvm->params->ttt_num_start = pgd_sz; + cvm->params->s2sz = VTCR_EL2_IPA(kvm->arch.vtcr); + cvm->params->vmid = cvm->cvm_vmid; + cvm->params->ns_vtcr = kvm->arch.vtcr; + cvm->params->vttbr_el2 = kvm->arch.mmu.pgd_phys; + memcpy(cvm->params->rpv, &cvm->cvm_vmid, sizeof(cvm->cvm_vmid)); + cvm->rd = tmi_cvm_create(__pa(cvm->params), numa_set); + if (!cvm->rd) { + kvm_err("KVM creates cVM failed: %d\n", cvm->cvm_vmid); + ret = -ENOMEM; + goto out; + } + + WRITE_ONCE(cvm->state, CVM_STATE_NEW); + ret = 0; +out: + kfree(cvm->params); + cvm->params = NULL; + if (ret < 0) { + kfree(cvm); + kvm->arch.virtcca_cvm = NULL; + } + return ret; +} + +void kvm_destroy_cvm(struct kvm *kvm) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + uint32_t cvm_vmid; + + if (!cvm) + return; + + cvm_vmid = cvm->cvm_vmid; + kfree(cvm->params); + cvm->params = NULL; + + if (virtcca_cvm_state(kvm) == CVM_STATE_NONE) + return; + + cvm_vmid_release(cvm_vmid); + + WRITE_ONCE(cvm->state, CVM_STATE_DYING); + + if (!tmi_cvm_destroy(cvm->rd)) + kvm_info("KVM has destroyed cVM: %d\n", cvm->cvm_vmid); + + kfree(cvm); + kvm->arch.virtcca_cvm = NULL; +} + +static int kvm_cvm_ttt_create(struct virtcca_cvm *cvm, + unsigned long addr, + int level, + u64 numa_set) +{ + addr = ALIGN_DOWN(addr, cvm_ttt_level_mapsize(level - 1)); + return tmi_ttt_create(numa_set, cvm->rd, addr, level); +} + +int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct virtcca_cvm *cvm, + unsigned long ipa, + int level, + int max_level, + struct kvm_mmu_memory_cache *mc) +{ + int ret = 0; + if (WARN_ON(level == max_level)) + return 0; + + while (level++ < max_level) { + u64 numa_set = kvm_get_first_binded_numa_set(kvm); + + ret = kvm_cvm_ttt_create(cvm, ipa, level, numa_set); + if (ret) + return -ENXIO; + } + + return 0; +} + +static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct virtcca_cvm *cvm, + unsigned long ipa, int level, struct page *src_page, u64 numa_set) +{ + phys_addr_t src_phys = 0; + int ret; + + if (src_page) + src_phys = page_to_phys(src_page); + ret = tmi_data_create(numa_set, cvm->rd, ipa, src_phys, level); + + if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { + /* Create missing RTTs and retry */ + int level_fault = TMI_RETURN_INDEX(ret); + + ret = kvm_cvm_create_ttt_levels(kvm, cvm, ipa, level_fault, + level, NULL); + if (ret) + goto err; + ret = tmi_data_create(numa_set, cvm->rd, ipa, src_phys, level); + } + if (ret) + goto err; + + return 0; + +err: + kvm_err("Cvm create protected data page fail:%d\n", ret); + return ret; +} + +static u64 cvm_granule_size(u32 level) +{ + return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); +} + +static bool is_data_create_region(phys_addr_t ipa_base, + struct kvm_cap_arm_tmm_populate_region_args *args) +{ + if ((ipa_base >= args->populate_ipa_base1 && + ipa_base < args->populate_ipa_base1 + args->populate_ipa_size1) || + (ipa_base >= args->populate_ipa_base2 && + ipa_base < args->populate_ipa_base2 + args->populate_ipa_size2)) + return true; + return false; +} + +int kvm_cvm_populate_par_region(struct kvm *kvm, u64 numa_set, + phys_addr_t ipa_base, phys_addr_t ipa_end, + struct kvm_cap_arm_tmm_populate_region_args *args) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct kvm_memory_slot *memslot; + gfn_t base_gfn, end_gfn; + int idx; + phys_addr_t ipa; + int ret = 0; + int level = TMM_TTT_LEVEL_3; + unsigned long map_size = cvm_granule_size(level); + + base_gfn = gpa_to_gfn(ipa_base); + end_gfn = gpa_to_gfn(ipa_end); + + idx = srcu_read_lock(&kvm->srcu); + memslot = gfn_to_memslot(kvm, base_gfn); + if (!memslot) { + ret = -EFAULT; + goto out; + } + + /* We require the region to be contained within a single memslot */ + if (memslot->base_gfn + memslot->npages < end_gfn) { + ret = -EINVAL; + goto out; + } + + mmap_read_lock(current->mm); + + ipa = ipa_base; + while (ipa < ipa_end) { + struct page *page = NULL; + kvm_pfn_t pfn = 0; + + /* + * FIXME: This causes over mapping, but there's no good + * solution here with the ABI as it stands + */ + ipa = ALIGN_DOWN(ipa, map_size); + + if (is_data_create_region(ipa, args)) { + pfn = gfn_to_pfn_memslot(memslot, gpa_to_gfn(ipa)); + if (is_error_pfn(pfn)) { + ret = -EFAULT; + break; + } + + page = pfn_to_page(pfn); + } + + ret = kvm_cvm_create_protected_data_page(kvm, cvm, ipa, level, page, numa_set); + if (ret) + goto err_release_pfn; + + ipa += map_size; + if (pfn) + kvm_release_pfn_dirty(pfn); +err_release_pfn: + if (ret) { + if (pfn) + kvm_release_pfn_clean(pfn); + break; + } + } + + mmap_read_unlock(current->mm); +out: + srcu_read_unlock(&kvm->srcu, idx); + return ret; +} + +int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu) +{ + int ret = 0; + int i; + u64 numa_set; + struct tmi_tec_params *params_ptr = NULL; + struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu); + u64 mpidr = kvm_vcpu_get_mpidr_aff(vcpu); + struct virtcca_cvm *cvm = vcpu->kvm->arch.virtcca_cvm; + struct virtcca_cvm_tec *tec = &vcpu->arch.tec; + + mutex_lock(&vcpu->kvm->lock); + tec->tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!tec->tec_run) { + ret = -ENOMEM; + goto tec_free; + } + params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!params_ptr) { + ret = -ENOMEM; + goto tec_free; + } + + for (i = 0; i < TEC_CREATE_NR_GPRS; ++i) + params_ptr->gprs[i] = vcpu_regs->regs[i]; + + params_ptr->pc = vcpu_regs->pc; + + if (vcpu->vcpu_id == 0) + params_ptr->flags = TMI_RUNNABLE; + else + params_ptr->flags = TMI_NOT_RUNNABLE; + params_ptr->ram_size = cvm->ram_size; + numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); + tec->tec = tmi_tec_create(numa_set, cvm->rd, mpidr, __pa(params_ptr)); + + tec->tec_created = true; + kfree(params_ptr); + mutex_unlock(&vcpu->kvm->lock); + return ret; + +tec_free: + kfree(tec->tec_run); + kfree(params_ptr); + mutex_unlock(&vcpu->kvm->lock); + return ret; +} + +static int config_cvm_hash_algo(struct tmi_cvm_params *params, + struct kvm_cap_arm_tmm_config_item *cfg) +{ + switch (cfg->hash_algo) { + case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256: + if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_256)) + return -EINVAL; + break; + case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512: + if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_512)) + return -EINVAL; + break; + default: + return -EINVAL; + } + params->measurement_algo = cfg->hash_algo; + return 0; +} + +static int config_cvm_sve(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct tmi_cvm_params *params; + int max_sve_vq; + + params = cvm->params; + max_sve_vq = u64_get_bits(tmm_feat_reg0, + TMI_FEATURE_REGISTER_0_SVE_VL); + + if (!kvm_cvm_supports_sve()) + return -EINVAL; + + if (cfg->sve_vq > max_sve_vq) + return -EINVAL; + + params->sve_vl = cfg->sve_vq; + params->flags |= TMI_CVM_PARAM_FLAG_SVE; + + return 0; +} + +static int config_cvm_pmu(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct tmi_cvm_params *params; + int max_pmu_num_ctrs; + + params = cvm->params; + max_pmu_num_ctrs = u64_get_bits(tmm_feat_reg0, + TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS); + + if (!kvm_cvm_supports_pmu()) + return -EINVAL; + + if (cfg->num_pmu_cntrs > max_pmu_num_ctrs) + return -EINVAL; + + params->pmu_num_cnts = cfg->num_pmu_cntrs; + params->flags |= TMI_CVM_PARAM_FLAG_PMU; + + return 0; +} + +static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct kvm_cap_arm_tmm_config_item cfg; + int r = 0; + + if (virtcca_cvm_state(kvm) != CVM_STATE_NONE) + return -EBUSY; + + if (copy_from_user(&cfg, (void __user *)cap->args[1], sizeof(cfg))) + return -EFAULT; + + switch (cfg.cfg) { + case KVM_CAP_ARM_TMM_CFG_SVE: + r = config_cvm_sve(kvm, &cfg); + break; + case KVM_CAP_ARM_TMM_CFG_PMU: + r = config_cvm_pmu(kvm, &cfg); + break; + case KVM_CAP_ARM_TMM_CFG_HASH_ALGO: + r = config_cvm_hash_algo(cvm->params, &cfg); + break; + default: + r = -EINVAL; + } + + return r; +} + +static int kvm_cvm_map_range(struct kvm *kvm) +{ + int ret; + u64 curr_numa_set; + int idx; + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + gpa_t gpa; + + curr_numa_set = kvm_get_first_binded_numa_set(kvm); + gpa = round_up(cvm->dtb_end, l2_granule); + for (idx = 0; idx < numa_info->numa_cnt; idx++) { + struct kvm_numa_node *numa_node = &numa_info->numa_nodes[idx]; + + if (idx) + gpa = numa_node->ipa_start; + if (gpa >= numa_node->ipa_start && + gpa < numa_node->ipa_start + numa_node->ipa_size) { + ret = tmi_ttt_map_range(cvm->rd, gpa, + numa_node->ipa_size - gpa + numa_node->ipa_start, + curr_numa_set, numa_node->host_numa_nodes[0]); + if (ret) { + kvm_err("tmi_ttt_map_range failed: %d.\n", ret); + return ret; + } + } + } + + return ret; +} + +static int kvm_activate_cvm(struct kvm *kvm) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + + if (virtcca_cvm_state(kvm) != CVM_STATE_NEW) + return -EINVAL; + + if (kvm_cvm_map_range(kvm)) + return -EFAULT; + + if (tmi_cvm_activate(cvm->rd)) { + kvm_err("tmi_cvm_activate failed!\n"); + return -ENXIO; + } + + WRITE_ONCE(cvm->state, CVM_STATE_ACTIVE); + kvm_info("cVM%d is activated!\n", cvm->cvm_vmid); + return 0; +} + +static int kvm_populate_ram_region(struct kvm *kvm, u64 map_size, + phys_addr_t ipa_base, phys_addr_t ipa_end, + struct kvm_cap_arm_tmm_populate_region_args *args) +{ + phys_addr_t gpa; + u64 numa_set = kvm_get_first_binded_numa_set(kvm); + + for (gpa = ipa_base; gpa < ipa_end; gpa += map_size) { + if (kvm_cvm_populate_par_region(kvm, numa_set, gpa, gpa + map_size, args)) { + kvm_err("kvm_cvm_populate_par_region failed: %d\n", -EFAULT); + return -EFAULT; + } + } + return 0; +} + +static int kvm_populate_ipa_cvm_range(struct kvm *kvm, + struct kvm_cap_arm_tmm_populate_region_args *args) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + phys_addr_t ipa_base1, ipa_end2; + + if (virtcca_cvm_state(kvm) != CVM_STATE_NEW) + return -EINVAL; + if (!IS_ALIGNED(args->populate_ipa_base1, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size1, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_base2, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size2, PAGE_SIZE)) + return -EINVAL; + + if (args->populate_ipa_base1 < cvm->loader_start || + args->populate_ipa_base2 < args->populate_ipa_base1 + args->populate_ipa_size1 || + cvm->dtb_end < args->populate_ipa_base2 + args->populate_ipa_size2) + return -EINVAL; + + if (args->flags & ~TMI_MEASURE_CONTENT) + return -EINVAL; + ipa_base1 = round_down(args->populate_ipa_base1, l2_granule); + ipa_end2 = round_up(args->populate_ipa_base2 + args->populate_ipa_size2, l2_granule); + + return kvm_populate_ram_region(kvm, l2_granule, ipa_base1, ipa_end2, args); +} + +int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + int r = 0; + + mutex_lock(&kvm->lock); + switch (cap->args[0]) { + case KVM_CAP_ARM_TMM_CONFIG_CVM_HOST: + r = kvm_tmm_config_cvm(kvm, cap); + break; + case KVM_CAP_ARM_TMM_CREATE_RD: + r = kvm_arm_create_cvm(kvm); + break; + case KVM_CAP_ARM_TMM_POPULATE_CVM: { + struct kvm_cap_arm_tmm_populate_region_args args; + void __user *argp = u64_to_user_ptr(cap->args[1]); + + if (copy_from_user(&args, argp, sizeof(args))) { + r = -EFAULT; + break; + } + r = kvm_populate_ipa_cvm_range(kvm, &args); + break; + } + case KVM_CAP_ARM_TMM_ACTIVATE_CVM: + r = kvm_activate_cvm(kvm); + break; + default: + r = -EINVAL; + break; + } + mutex_unlock(&kvm->lock); + + return r; +} + +void kvm_destroy_tec(struct kvm_vcpu *vcpu) +{ + struct virtcca_cvm_tec *tec = &vcpu->arch.tec; + + if (!vcpu_is_tec(vcpu)) + return; + + if (tmi_tec_destroy(tec->tec) != 0) + kvm_err("%s vcpu id : %d failed!\n", __func__, vcpu->vcpu_id); + + tec->tec = 0; + kfree(tec->tec_run); +} + +static int tmi_check_version(void) +{ + u64 res; + int version_major; + int version_minor; + + res = tmi_version(); + if (res == SMCCC_RET_NOT_SUPPORTED) + return -ENXIO; + + version_major = TMI_ABI_VERSION_GET_MAJOR(res); + version_minor = TMI_ABI_VERSION_GET_MINOR(res); + + if (version_major != TMI_ABI_VERSION_MAJOR) { + kvm_err("Unsupported TMI_ABI (version %d %d)\n", version_major, + version_minor); + return -ENXIO; + } + + kvm_info("TMI ABI version %d,%d\n", version_major, version_minor); + return 0; +} + +int kvm_tec_enter(struct kvm_vcpu *vcpu) +{ + struct tmi_tec_run *run; + struct virtcca_cvm_tec *tec = &vcpu->arch.tec; + struct virtcca_cvm *cvm = vcpu->kvm->arch.virtcca_cvm; + + if (READ_ONCE(cvm->state) != CVM_STATE_ACTIVE) + return -EINVAL; + + run = tec->tec_run; + /* set/clear TWI TWE flags */ + if (vcpu->arch.hcr_el2 & HCR_TWI) + run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFI; + else + run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFI; + + if (vcpu->arch.hcr_el2 & HCR_TWE) + run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFE; + else + run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFE; + + return tmi_tec_enter(tec->tec, __pa(run)); +} + +int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target) +{ + int ret; + struct virtcca_cvm_tec *calling_tec = &calling->arch.tec; + struct virtcca_cvm_tec *target_tec = &target->arch.tec; + + ret = tmi_psci_complete(calling_tec->tec, target_tec->tec); + if (ret) + return -EINVAL; + return 0; +} + +int kvm_init_tmm(void) +{ + int ret; + + if (PAGE_SIZE != SZ_4K) + return 0; + + if (tmi_check_version()) + return 0; + + ret = cvm_vmid_init(); + if (ret) + return ret; + + tmm_feat_reg0 = tmi_features(0); + kvm_info("TMM feature0: 0x%lx\n", tmm_feat_reg0); + + static_branch_enable(&virtcca_cvm_is_available); + + return 0; +} + +static bool is_numa_ipa_range_valid(struct kvm_numa_info *numa_info) +{ + unsigned long i; + struct kvm_numa_node *numa_node, *prev_numa_node; + + prev_numa_node = NULL; + for (i = 0; i < numa_info->numa_cnt; i++) { + numa_node = &numa_info->numa_nodes[i]; + if (numa_node->ipa_start + numa_node->ipa_size < numa_node->ipa_start) + return false; + if (prev_numa_node && + numa_node->ipa_start < prev_numa_node->ipa_start + prev_numa_node->ipa_size) + return false; + prev_numa_node = numa_node; + } + if (numa_node->ipa_start + numa_node->ipa_size > CVM_IPA_MAX_VAL) + return false; + return true; +} + +int kvm_load_user_data(struct kvm *kvm, unsigned long arg) +{ + struct kvm_user_data user_data; + void __user *argp = (void __user *)arg; + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct kvm_numa_info *numa_info; + + if (!kvm_is_virtcca_cvm(kvm)) + return -EFAULT; + + if (copy_from_user(&user_data, argp, sizeof(user_data))) + return -EINVAL; + + numa_info = &user_data.numa_info; + if (numa_info->numa_cnt > MAX_NUMA_NODE) + return -EINVAL; + + if (numa_info->numa_cnt > 0) { + unsigned long i, total_size = 0; + struct kvm_numa_node *numa_node = &numa_info->numa_nodes[0]; + unsigned long ipa_end = numa_node->ipa_start + numa_node->ipa_size; + + if (!is_numa_ipa_range_valid(numa_info)) + return -EINVAL; + if (user_data.loader_start < numa_node->ipa_start || + user_data.dtb_end > ipa_end) + return -EINVAL; + for (i = 0; i < numa_info->numa_cnt; i++) + total_size += numa_info->numa_nodes[i].ipa_size; + if (total_size != user_data.ram_size) + return -EINVAL; + } + + if (user_data.image_end <= user_data.loader_start || + user_data.initrd_start < user_data.image_end || + user_data.dtb_end < user_data.initrd_start || + user_data.ram_size < user_data.dtb_end - user_data.loader_start) + return -EINVAL; + + cvm->loader_start = user_data.loader_start; + cvm->image_end = user_data.image_end; + cvm->initrd_start = user_data.initrd_start; + cvm->dtb_end = user_data.dtb_end; + cvm->ram_size = user_data.ram_size; + memcpy(&cvm->numa_info, numa_info, sizeof(struct kvm_numa_info)); + + return 0; +} + +void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu) +{ + kvm_timer_vcpu_put(vcpu); + kvm_vgic_put(vcpu); + vcpu->cpu = -1; +} + +unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu, + unsigned long target_affinity, unsigned long lowest_affinity_level) +{ + struct kvm_vcpu *target_vcpu; + + if (lowest_affinity_level != 0) + return PSCI_RET_INVALID_PARAMS; + + target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, target_affinity); + if (!target_vcpu) + return PSCI_RET_INVALID_PARAMS; + + cvm_psci_complete(vcpu, target_vcpu); + return PSCI_RET_SUCCESS; +} + +int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu, + bool serror_pending, bool ext_dabt_pending) +{ + struct virtcca_cvm_tec *tec = &vcpu->arch.tec; + + if (serror_pending) + return -EINVAL; + + if (ext_dabt_pending) { + if (!(((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags & + TEC_ENTRY_FLAG_EMUL_MMIO)) + return -EINVAL; + + ((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags + &= ~TEC_ENTRY_FLAG_EMUL_MMIO; + ((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags + |= TEC_ENTRY_FLAG_INJECT_SEA; + } + return 0; +} + +int kvm_init_cvm_vm(struct kvm *kvm) +{ + struct tmi_cvm_params *params; + struct virtcca_cvm *cvm; + + if (kvm->arch.virtcca_cvm) { + kvm_info("cvm already create.\n"); + return 0; + } + + cvm = (struct virtcca_cvm *)kzalloc(sizeof(struct virtcca_cvm), GFP_KERNEL_ACCOUNT); + if (!cvm) + return -ENOMEM; + + kvm->arch.virtcca_cvm = cvm; + params = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!params) { + kfree(kvm->arch.virtcca_cvm); + kvm->arch.virtcca_cvm = NULL; + return -ENOMEM; + } + + cvm->params = params; + WRITE_ONCE(cvm->state, CVM_STATE_NONE); + + return 0; +} diff --git a/arch/arm64/kvm/virtcca_cvm_exit.c b/arch/arm64/kvm/virtcca_cvm_exit.c new file mode 100644 index 000000000..9654375a9 --- /dev/null +++ b/arch/arm64/kvm/virtcca_cvm_exit.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, The Linux Foundation. All rights reserved. + */ +#include <linux/kvm_host.h> +#include <kvm/arm_hypercalls.h> +#include <kvm/arm_psci.h> + +#include <asm/kvm_tmi.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_mmu.h> + +typedef int (*exit_handler_fn)(struct kvm_vcpu *vcpu); + +static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu, bool unmask_ctl) +{ + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + + __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = run->tec_exit.cntv_ctl; + __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = run->tec_exit.cntv_cval; + __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = run->tec_exit.cntp_ctl; + __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = run->tec_exit.cntp_cval; + + /* Because the timer mask is tainted by TMM, we don't know the + * true intent of the guest. Here, we assume mask is always + * cleared during WFI. + */ + if (unmask_ctl) { + __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK; + __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK; + } + + kvm_cvm_timers_update(vcpu); +} + +static int tec_exit_reason_notimpl(struct kvm_vcpu *vcpu) +{ + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + + pr_err("[vcpu %d] Unhandled exit reason from cvm (ESR: %#llx)\n", + vcpu->vcpu_id, run->tec_exit.esr); + return -ENXIO; +} + +/* The process is the same as kvm_handle_wfx, + * except the tracing and updating operation for pc, + * we copy kvm_handle_wfx process here + * to avoid changing kvm_handle_wfx function. + */ +static int tec_exit_wfx(struct kvm_vcpu *vcpu) +{ + u64 esr = kvm_vcpu_get_esr(vcpu); + + if (esr & ESR_ELx_WFx_ISS_WFE) { + vcpu->stat.wfe_exit_stat++; + } else { + vcpu->stat.wfi_exit_stat++; + } + + if (esr & ESR_ELx_WFx_ISS_WFxT) { + if (esr & ESR_ELx_WFx_ISS_RV) { + u64 val, now; + + now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT); + val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu)); + + if (now >= val) + goto out; + } else { + /* Treat WFxT as WFx if RN is invalid */ + esr &= ~ESR_ELx_WFx_ISS_WFxT; + } + } + + if (esr & ESR_ELx_WFx_ISS_WFE) { + kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); + } else { + if (esr & ESR_ELx_WFx_ISS_WFxT) + vcpu_set_flag(vcpu, IN_WFIT); + + kvm_vcpu_wfi(vcpu); + } + +out: + return 1; +} + +static int tec_exit_sys_reg(struct kvm_vcpu *vcpu) +{ + int ret; + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + unsigned long esr = kvm_vcpu_get_esr(vcpu); + int rt = kvm_vcpu_sys_get_rt(vcpu); + bool is_write = !(esr & 1); + + if (is_write) + vcpu_set_reg(vcpu, rt, run->tec_exit.gprs[0]); + + ret = kvm_handle_sys_reg(vcpu); + + if (ret >= 0 && !is_write) + run->tec_entry.gprs[0] = vcpu_get_reg(vcpu, rt); + + return ret; +} + +static int tec_exit_sync_dabt(struct kvm_vcpu *vcpu) +{ + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + + if (kvm_vcpu_dabt_iswrite(vcpu) && kvm_vcpu_dabt_isvalid(vcpu)) { + vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), + run->tec_exit.gprs[0]); + } + return kvm_handle_guest_abort(vcpu); +} + +static int tec_exit_sync_iabt(struct kvm_vcpu *vcpu) +{ + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + + pr_err("[vcpu %d] Unhandled instruction abort (ESR: %#llx).\n", + vcpu->vcpu_id, run->tec_exit.esr); + + return -ENXIO; +} + +static exit_handler_fn tec_exit_handlers[] = { + [0 ... ESR_ELx_EC_MAX] = tec_exit_reason_notimpl, + [ESR_ELx_EC_WFx] = tec_exit_wfx, + [ESR_ELx_EC_SYS64] = tec_exit_sys_reg, + [ESR_ELx_EC_DABT_LOW] = tec_exit_sync_dabt, + [ESR_ELx_EC_IABT_LOW] = tec_exit_sync_iabt +}; + +static int tec_exit_psci(struct kvm_vcpu *vcpu) +{ + int i; + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + + for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) + vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]); + + return kvm_psci_call(vcpu); +} + +static int tec_exit_host_call(struct kvm_vcpu *vcpu) +{ + int ret, i; + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + + vcpu->stat.hvc_exit_stat++; + + for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) + vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]); + + ret = kvm_smccc_call_handler(vcpu); + + if (ret < 0) { + vcpu_set_reg(vcpu, 0, ~0UL); + ret = 1; + } + for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) + run->tec_entry.gprs[i] = vcpu_get_reg(vcpu, i); + + return ret; +} + +/* + * Return > 0 to return to guest, < 0 on error, 0(and set exit_reason) on + * proper exit to userspace + */ + +int handle_cvm_exit(struct kvm_vcpu *vcpu, int tec_run_ret) +{ + unsigned long status; + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + u8 esr_ec = ESR_ELx_EC(run->tec_exit.esr); + bool is_wfx; + + status = TMI_RETURN_STATUS(tec_run_ret); + + if (status == TMI_ERROR_CVM_POWEROFF) { + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN; + return 0; + } + + if (status == TMI_ERROR_CVM_STATE) { + vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; + return 0; + } + + if (tec_run_ret) + return -ENXIO; + + vcpu->arch.fault.esr_el2 = run->tec_exit.esr; + vcpu->arch.fault.far_el2 = run->tec_exit.far; + vcpu->arch.fault.hpfar_el2 = run->tec_exit.hpfar; + + is_wfx = (run->tec_exit.exit_reason == TMI_EXIT_SYNC) && (esr_ec == ESR_ELx_EC_WFx); + update_arch_timer_irq_lines(vcpu, is_wfx); + + run->tec_entry.flags = 0; + + switch (run->tec_exit.exit_reason) { + case TMI_EXIT_FIQ: + case TMI_EXIT_IRQ: + return 1; + case TMI_EXIT_PSCI: + return tec_exit_psci(vcpu); + case TMI_EXIT_SYNC: + return tec_exit_handlers[esr_ec](vcpu); + case TMI_EXIT_HOST_CALL: + return tec_exit_host_call(vcpu); + } + + kvm_pr_unimpl("Unsupported exit reason : 0x%llx\n", + run->tec_exit.exit_reason); + return 0; +} diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index fee20f66d..9d3f034bd 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -172,4 +172,8 @@ static inline bool has_cntpoff(void) return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +/* Needed for S-EL2 */ +void kvm_cvm_timers_update(struct kvm_vcpu *vcpu); +#endif #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4557f4735..000ae1ba0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -543,6 +543,28 @@ static __always_inline void guest_state_exit_irqoff(void) instrumentation_end(); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST + +#define KVM_TYPE_CVM_BIT 8 +#define CVM_MAX_HALT_POLL_NS 100000 + +DECLARE_STATIC_KEY_FALSE(virtcca_cvm_is_available); + +static __always_inline bool vcpu_is_tec(struct kvm_vcpu *vcpu) +{ + if (static_branch_unlikely(&virtcca_cvm_is_available)) + return vcpu->arch.tec.tec_run; + + return false; +} + +static inline bool kvm_arm_cvm_type(unsigned long type) +{ + return type & (1UL << KVM_TYPE_CVM_BIT); +} + +#endif + static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { /* diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index cb63d56d8..15012e713 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1509,6 +1509,19 @@ struct kvm_numa_info { #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data) + +#define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */ + +struct kvm_user_data { + __u64 loader_start; + __u64 image_end; + __u64 initrd_start; + __u64 dtb_end; + __u64 ram_size; + struct kvm_numa_info numa_info; +}; + /* enable ucontrol for s390 */ struct kvm_s390_ucas_mapping { __u64 user_addr; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 62f19b4b9..3ccb5f2bc 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3550,6 +3550,10 @@ static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + return CVM_MAX_HALT_POLL_NS; +#endif if (kvm->override_halt_poll_ns) { /* * Ensure kvm->max_halt_poll_ns is not read before