virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
--------------------------------
adjusting the interface to adapt to the cca community
Signed-off-by: Ju Fu fuju1@huawei.com --- arch/arm64/include/asm/kvm_pgtable.h | 3 + arch/arm64/include/asm/kvm_tmi.h | 44 ++- arch/arm64/include/asm/kvm_tmm.h | 5 +- arch/arm64/include/asm/sysreg.h | 3 + arch/arm64/include/uapi/asm/kvm.h | 4 + arch/arm64/kvm/arm.c | 74 ++-- arch/arm64/kvm/cvm.c | 401 ++++++++++---------- arch/arm64/kvm/reset.c | 12 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 3 + 9 files changed, 300 insertions(+), 249 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index f5dff6d40..2068ec591 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -53,6 +53,9 @@ enum kvm_pgtable_prot { KVM_PGTABLE_PROT_PBHA3 = BIT(62), };
+#define TMI_NO_MEASURE_CONTENT U(0) +#define TMI_MEASURE_CONTENT U(1) + #define PAGE_HYP (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W) #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X) #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R) diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index 232488161..26db4e8b5 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -9,12 +9,14 @@ #include <asm/kvm_asm.h> #include <asm/kvm_pgtable.h> #include <linux/virtio_ring.h> +#include <asm/sysreg.h>
#define GRANULE_SIZE 4096
#define NO_NUMA 0 /* every bit in numa set is 0 */
-#define TMM_TTT_LEVEL_3 3 +#define TMM_TTT_LEVEL_2 2 +#define TMM_TTT_LEVEL_3 3
#ifdef CONFIG_CVM_HOST_FVP_PLAT #define CVM_MEM_BASE ULL(0x8800000000) /* choose FVP platform to run cVM */ @@ -35,17 +37,18 @@ /* TMI error codes. */ #define TMI_SUCCESS 0 #define TMI_ERROR_INPUT 1 -#define TMI_ERROR_MEMORY 2 +#define TMI_ERROR_MEMORY 2 #define TMI_ERROR_ALIAS 3 -#define TMI_ERROR_IN_USE 4 -#define TMI_ERROR_CVM_STATE 5 +#define TMI_ERROR_IN_USE 4 +#define TMI_ERROR_CVM_STATE 5 #define TMI_ERROR_OWNER 6 -#define TMI_ERROR_TEC 7 -#define TMI_ERROR_TTT_WALK 8 -#define TMI_ERROR_TTT_ENTRY 9 -#define TMI_ERROR_NOT_SUPPORTED 10 -#define TMI_ERROR_INTERNAL 11 -#define TMI_ERROR_CVM_POWEROFF 12 +#define TMI_ERROR_TEC 7 +#define TMI_ERROR_TTT_WALK 8 +#define TMI_ERROR_TTT_ENTRY 9 +#define TMI_ERROR_NOT_SUPPORTED 10 +#define TMI_ERROR_INTERNAL 11 +#define TMI_ERROR_CVM_POWEROFF 12 +#define TMI_ERROR_TTT_CREATED 13
#define TMI_RETURN_STATUS(ret) ((ret) & 0xFF) #define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF) @@ -268,10 +271,9 @@ struct tmi_tec_run {
/* KVM_CAP_ARM_TMM on VM fd */ #define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0 -#define KVM_CAP_ARM_TMM_CREATE_CVM 1 -#define KVM_CAP_ARM_TMM_INIT_IPA_CVM 2 -#define KVM_CAP_ARM_TMM_POPULATE_CVM 3 -#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 4 +#define KVM_CAP_ARM_TMM_CREATE_RD 1 +#define KVM_CAP_ARM_TMM_POPULATE_CVM 2 +#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3
#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0 #define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1 @@ -321,6 +323,14 @@ struct kvm_cap_arm_tmm_config_item { }; };
+#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) +struct kvm_cap_arm_tmm_populate_region_args { + __u64 populate_ipa_base; + __u64 populate_ipa_size; + __u32 flags; + __u32 reserved[3]; +}; + enum tmi_tmm_mem_type { TMM_MEM_TYPE_RD, TMM_MEM_TYPE_TEC, @@ -342,6 +352,12 @@ static inline bool tmm_is_addr_ttt_level_aligned(uint64_t addr, int level) return (addr & mask) == 0; }
+static inline bool is_armv8_4_sel2_present(void) +{ + return ((read_sysreg(id_aa64pfr0_el1) >> ID_AA64PFR0_SEL2_SHIFT) & + ID_AA64PFR0_SEL2_MASK) == 1UL; +} + u64 phys_to_cvm_phys(u64 phys);
u64 tmi_version(void); diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index 3452b4429..b828e74e6 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -38,18 +38,15 @@ struct cvm_tec {
int kvm_init_tmm(void); int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); -int kvm_init_cvm_vm(struct kvm *kvm); void kvm_destroy_cvm(struct kvm *kvm); -int kvm_create_tec(struct kvm_vcpu *vcpu); +int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu); void kvm_destroy_tec(struct kvm_vcpu *vcpu); int kvm_tec_enter(struct kvm_vcpu *vcpu); int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status); int kvm_arm_create_cvm(struct kvm *kvm); void kvm_free_rd(struct kvm *kvm); int cvm_create_rd(struct kvm *kvm); -int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu); int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target); -int kvm_arch_tec_init(struct kvm_vcpu *vcpu);
void kvm_cvm_unmap_destroy_range(struct kvm *kvm);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 8bcc9ac99..9b118a4b9 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1326,6 +1326,9 @@ #define ID_AA64PFR0_EL2_SHIFT 8 #define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL0_SHIFT 0 +#ifdef CONFIG_CVM_HOST // TODO 统一排查 +#define ID_AA64PFR0_SEL2_MASK ULL(0xf) +#endif
#define ID_AA64PFR0_MPAM 0x1 #define ID_AA64PFR0_AMU 0x1 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 531ff62e8..a53b16798 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -106,6 +106,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ +#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */
struct kvm_vcpu_init { __u32 target; @@ -354,6 +355,9 @@ struct kvm_vcpu_events { #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 #define KVM_DEV_ARM_ITS_CTRL_RESET 4
+#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256 0 +#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512 1 + /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 #define KVM_ARM_VCPU_PMU_V3_IRQ 0 diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 5372a53a6..dc55b64d1 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -145,6 +145,37 @@ static void set_default_csv2(struct kvm *kvm) kvm->arch.pfr0_csv2 = 1; }
+static int kvm_create_cvm_vm(struct kvm *kvm) +{ + if (!static_key_enabled(&kvm_cvm_is_available)) + return -EFAULT; + + if (kvm->arch.cvm) { + kvm_info("cvm already create.\n"); + return 0; + } + + kvm->arch.cvm = kzalloc(sizeof(struct cvm), GFP_KERNEL_ACCOUNT); + if (!kvm->arch.cvm) + return -ENOMEM; + + return 0; +} + +static int kvm_init_cvm_vm(struct kvm *kvm) +{ + struct tmi_cvm_params *params; + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + + params = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + cvm->params = params; + + return 0; +} + /** * kvm_arch_init_vm - initializes a VM data structure * @kvm: pointer to the KVM struct @@ -160,28 +191,25 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) #endif
#ifdef CONFIG_CVM_HOST + ret = kvm_create_cvm_vm(kvm); + if (ret) + return ret; if (kvm_arm_cvm_type(type)) { - ret = cvm_create_rd(kvm); - if (ret) - return ret; + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + + cvm->is_cvm = true; + if (!kvm_is_cvm(kvm)) + return -EINVAL; } #endif
ret = kvm_arm_setup_stage2(kvm, type); if (ret) -#ifdef CONFIG_CVM_HOST - goto out_free_rd; -#else return ret; -#endif
ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu); if (ret) -#ifdef CONFIG_CVM_HOST - goto out_free_rd; -#else return ret; -#endif
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); if (ret) @@ -193,6 +221,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
set_default_csv2(kvm); + #ifdef CONFIG_CVM_HOST if (kvm_arm_cvm_type(type)) { ret = kvm_init_cvm_vm(kvm); @@ -204,10 +233,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return ret; out_free_stage2_pgd: kvm_free_stage2_pgd(&kvm->arch.mmu); -#ifdef CONFIG_CVM_HOST -out_free_rd: - kvm_free_rd(kvm); -#endif return ret; }
@@ -311,6 +336,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; #ifdef CONFIG_CVM_HOST case KVM_CAP_ARM_TMM: + if (!is_armv8_4_sel2_present()) { + r = -ENXIO; + break; + } r = static_key_enabled(&kvm_cvm_is_available); break; #endif @@ -408,14 +437,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) return err; #endif - -#ifdef CONFIG_CVM_HOST - if (kvm_is_cvm(vcpu->kvm)) { - err = kvm_arch_tec_init(vcpu); - if (err) - return err; - } -#endif return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); }
@@ -924,13 +945,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ret = kvm_vcpu_first_run_init(vcpu); if (ret) return ret; -#ifdef CONFIG_CVM_HOST - if (kvm_is_cvm(vcpu->kvm)) { - ret = kvm_arm_cvm_first_run(vcpu); - if (ret) - return ret; - } -#endif if (run->exit_reason == KVM_EXIT_MMIO) { ret = kvm_handle_mmio_return(vcpu); if (ret) diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c index c3083b659..5c0390f88 100644 --- a/arch/arm64/kvm/cvm.c +++ b/arch/arm64/kvm/cvm.c @@ -162,14 +162,21 @@ int kvm_arm_create_cvm(struct kvm *kvm) struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; unsigned int pgd_sz; struct cvm *cvm = (struct cvm *)kvm->arch.cvm; - u64 numa_set; + /* get affine host numa set by default vcpu 0 */ + u64 numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm);
if (!kvm_is_cvm(kvm) || kvm_cvm_state(kvm) != CVM_STATE_NONE) return 0;
+ cvm->rd = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); + if (cvm->rd == 0) { + kvm_err("tmi_mem_alloc for cvm rd failed: %d\n", cvm->cvm_vmid); + return -ENOMEM; + } + ret = cvm_vmid_reserve(); if (ret < 0) - return ret; + goto out;
cvm->cvm_vmid = ret;
@@ -186,12 +193,17 @@ int kvm_arm_create_cvm(struct kvm *kvm) numa_set = kvm_get_first_binded_numa_set(kvm); ret = tmi_cvm_create(cvm->rd, __pa(cvm->params), numa_set); if (!ret) - kvm_info("KVM creates cVM: %d\n", cvm->cvm_vmid); + kvm_err("KVM creates cVM: %d\n", cvm->cvm_vmid);
WRITE_ONCE(cvm->state, CVM_STATE_NEW); kfree(cvm->params); cvm->params = NULL; return ret; +out: + kfree(cvm->params); + cvm->params = NULL; + kvm_free_rd(kvm); + return ret; }
int cvm_create_rd(struct kvm *kvm) @@ -199,18 +211,6 @@ int cvm_create_rd(struct kvm *kvm) struct cvm *cvm; u64 numa_set;
- if (!static_key_enabled(&kvm_cvm_is_available)) - return -EFAULT; - - if (kvm->arch.cvm) { - kvm_err("cvm already create.\n"); - return -EFAULT; - } - - kvm->arch.cvm = kzalloc(sizeof(struct cvm), GFP_KERNEL_ACCOUNT); - if (!kvm->arch.cvm) - return -ENOMEM; - /* get affine host numa set by default vcpu 0 */ numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm); cvm = (struct cvm *)kvm->arch.cvm; @@ -284,6 +284,7 @@ int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm, int max_level, struct kvm_mmu_memory_cache *mc) { + int ret = 0; if (WARN_ON(level == max_level)) return 0;
@@ -296,7 +297,12 @@ int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm, if (ttt == 0) return -ENOMEM;
- if (kvm_cvm_ttt_create(cvm, ipa, level, ttt)) { + ret = kvm_cvm_ttt_create(cvm, ipa, level, ttt); + if (ret == TMI_ERROR_TTT_CREATED) { + ret = 0; + (void)tmi_mem_free(ttt, numa_set, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); + continue; + } else if (ret) { (void)tmi_mem_free(ttt, numa_set, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); return -ENXIO; } @@ -314,6 +320,10 @@ static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct cvm *cvm,
src_phys = page_to_phys(src_page); ret = tmi_data_create(dst_phys, cvm->rd, ipa, src_phys, level); + if (ret == TMI_ERROR_TTT_CREATED) { + ret = 0; + return ret; + } if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { /* Create missing RTTs and retry */ int level_fault = TMI_RETURN_INDEX(ret); @@ -412,72 +422,34 @@ int kvm_cvm_populate_par_region(struct kvm *kvm, return ret; }
-static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu) +static int kvm_arch_tec_init(struct kvm_vcpu *vcpu) { - int ret = 0; - gpa_t gpa, gpa_data_end, gpa_end, data_size; - u64 i, map_size, dst_phys, numa_set; - u64 l2_granule = cvm_granule_size(2); /* 2MB */ + int ret = -ENOMEM; + u64 numa_set; + struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec; struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; - struct kvm_numa_info *numa_info; - - /* 2MB alignment below addresses*/ - gpa = cvm->loader_start; - gpa_end = cvm->loader_start + cvm->ram_size; - data_size = cvm->initrd_start - cvm->loader_start + - cvm->initrd_size; - data_size = round_up(data_size, l2_granule); - gpa_data_end = cvm->loader_start + data_size + l2_granule; - gpa = round_down(gpa, l2_granule); - gpa_end = round_up(gpa_end, l2_granule); - gpa_data_end = round_up(gpa_data_end, l2_granule); - numa_info = &cvm->numa_info; - - numa_set = kvm_get_first_binded_numa_set(vcpu->kvm); - map_size = l2_granule; - do { - dst_phys = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_CVM_PA, map_size); - if (!dst_phys) { - ret = -ENOMEM; - kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__); - goto out; - }
- ret = kvm_cvm_populate_par_region(vcpu->kvm, gpa, gpa + map_size, dst_phys); - if (ret) { - kvm_err("kvm_cvm_populate_par_region fail:%d.\n", ret); - goto out; - } - gpa += map_size; - } while (gpa < gpa_data_end); + tec->tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!tec->tec_run) + goto tec_free;
- if (numa_info->numa_cnt > 0) - gpa_end = numa_info->numa_nodes[0].ipa_start + numa_info->numa_nodes[0].ipa_size; - /* Map gpa range to secure mem without copy data from host. - * The cvm gpa map pages will free by destroy cvm. - */ - ret = tmi_ttt_map_range(cvm->rd, gpa_data_end, - gpa_end - gpa_data_end, numa_set, numa_set); - if (ret) { - kvm_err("tmi_ttt_map_range fail:%d.\n", ret); - goto out; + numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); + tec->tec = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); + if (!tec->tec) { + kvm_err("KVM tmi_mem_alloc failed:%d\n", vcpu->vcpu_id); + goto tec_free; } + kvm_info("KVM inits cVM VCPU:%d\n", vcpu->vcpu_id);
- for (i = 1; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) { - struct kvm_numa_node *numa_node = &numa_info->numa_nodes[i]; - - ret = tmi_ttt_map_range(cvm->rd, numa_node->ipa_start, - numa_node->ipa_size, numa_set, numa_node->host_numa_nodes[0]); - if (ret) { - kvm_err("tmi_ttt_map_range fail:%d.\n", ret); - goto out; - } - } -out: + return 0; +tec_free: + kfree(tec->tec_run); + kfree(tec); + vcpu->arch.tec = NULL; return ret; }
-int kvm_create_tec(struct kvm_vcpu *vcpu) +static int kvm_create_tec(struct kvm_vcpu *vcpu) { int ret; int i; @@ -503,38 +475,51 @@ int kvm_create_tec(struct kvm_vcpu *vcpu) params_ptr->ram_size = cvm->ram_size; ret = tmi_tec_create(tec->tec, cvm->rd, mpidr, __pa(params_ptr));
+ tec->tec_created = true; kfree(params_ptr);
return ret; }
-static int kvm_create_all_tecs(struct kvm *kvm) +int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu) { int ret = 0; - struct kvm_vcpu *vcpu; - unsigned long i; - struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
- if (READ_ONCE(cvm->state) == CVM_STATE_ACTIVE) - return -1; + if (!vcpu->arch.tec) { + vcpu->arch.tec = kzalloc(sizeof(struct cvm_tec), GFP_KERNEL_ACCOUNT); + if (!vcpu->arch.tec) + return -ENOMEM; + }
- mutex_lock(&kvm->lock); - kvm_for_each_vcpu(i, vcpu, kvm) { - struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec; + ret = kvm_arch_tec_init(vcpu); + if (ret) + return ret; + ret = kvm_create_tec(vcpu); + if (ret) + return ret;
- if (!tec->tec_created) { - ret = kvm_create_tec(vcpu); - if (ret) { - mutex_unlock(&kvm->lock); - return ret; - } - tec->tec_created = true; - } - } - mutex_unlock(&kvm->lock); return ret; }
+static int config_cvm_hash_algo(struct tmi_cvm_params *params, + struct kvm_cap_arm_tmm_config_item *cfg) +{ + switch (cfg->hash_algo) { + case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256: + if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_256)) + return -EINVAL; + break; + case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512: + if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_512)) + return -EINVAL; + break; + default: + return -EINVAL; + } + params->measurement_algo = cfg->hash_algo; + return 0; +} + static int config_cvm_sve(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) { struct cvm *cvm = (struct cvm *)kvm->arch.cvm; @@ -581,6 +566,7 @@ static int config_cvm_pmu(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *c
static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) { + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; struct kvm_cap_arm_tmm_config_item cfg; int r = 0;
@@ -597,6 +583,9 @@ static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) case KVM_CAP_ARM_TMM_CFG_PMU: r = config_cvm_pmu(kvm, &cfg); break; + case KVM_CAP_ARM_TMM_CFG_HASH_ALGO: + r = config_cvm_hash_algo(cvm->params, &cfg); + break; default: r = -EINVAL; } @@ -604,6 +593,106 @@ static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) return r; }
+static int kvm_cvm_map_range(struct kvm *kvm) +{ + int ret; + u64 curr_numa_set; + int idx; + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + gpa_t gpa, gpa_data_end, data_size; + + data_size = cvm->initrd_start - cvm->loader_start + cvm->initrd_size; + data_size = round_up(data_size, l2_granule); + gpa_data_end = cvm->loader_start + data_size + l2_granule; + gpa_data_end = round_up(gpa_data_end, l2_granule); + + curr_numa_set = kvm_get_first_binded_numa_set(kvm); + + gpa = gpa_data_end; + for (idx = 0; idx < numa_info->numa_cnt; idx++) { + struct kvm_numa_node *numa_node = &numa_info->numa_nodes[idx]; + + if (idx) + gpa = numa_node->ipa_start; + if (gpa >= numa_node->ipa_start && + gpa < numa_node->ipa_start + numa_node->ipa_size) { + ret = tmi_ttt_map_range(cvm->rd, gpa, + numa_node->ipa_size - gpa + numa_node->ipa_start, + curr_numa_set, numa_node->host_numa_nodes[0]); + if (ret) { + kvm_err("tmi_ttt_map_range failed: %d.\n", ret); + return ret; + } + } + } + + return ret; +} + +static int kvm_activate_cvm(struct kvm *kvm) +{ + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + + if (kvm_cvm_state(kvm) != CVM_STATE_NEW) + return -EINVAL; + + if (kvm_cvm_map_range(kvm)) + return -EFAULT; + + if (tmi_cvm_activate(cvm->rd)) { + kvm_err("tmi_cvm_activate failed!\n"); + return -ENXIO; + } + + WRITE_ONCE(cvm->state, CVM_STATE_ACTIVE); + kvm_info("cVM%d is activated!\n", cvm->cvm_vmid); + return 0; +} + +static int kvm_populate_ipa_cvm_range(struct kvm *kvm, + struct kvm_cap_arm_tmm_populate_region_args *args) +{ + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + phys_addr_t ipa_base, ipa_end, gpa; + u64 map_size, dst_phys; + u64 numa_set; + + if (kvm_cvm_state(kvm) != CVM_STATE_NEW) + return -EINVAL; + if (!IS_ALIGNED(args->populate_ipa_base, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size, PAGE_SIZE)) + return -EINVAL; + + if (args->flags & ~TMI_MEASURE_CONTENT) + return -EINVAL; + ipa_base = round_down(args->populate_ipa_base, l2_granule); + ipa_end = round_up(args->populate_ipa_base + + args->populate_ipa_size + l2_granule, l2_granule); + + if (ipa_end < ipa_base) + return -EINVAL; + + numa_set = kvm_get_first_binded_numa_set(kvm); + map_size = l2_granule; + for (gpa = ipa_base; gpa < ipa_end; gpa += map_size) { + dst_phys = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_CVM_PA, map_size); + if (!dst_phys) { + kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__); + return -ENOMEM; + } + + if (kvm_cvm_populate_par_region(kvm, gpa, gpa + map_size, dst_phys)) { + kvm_err("kvm_cvm_populate_par_region failed: %d\n", -EFAULT); + return -EFAULT; + } + } + + return 0; +} + int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { int r = 0; @@ -613,9 +702,23 @@ int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) case KVM_CAP_ARM_TMM_CONFIG_CVM_HOST: r = kvm_tmm_config_cvm(kvm, cap); break; - case KVM_CAP_ARM_TMM_CREATE_CVM: + case KVM_CAP_ARM_TMM_CREATE_RD: r = kvm_arm_create_cvm(kvm); break; + case KVM_CAP_ARM_TMM_POPULATE_CVM: { + struct kvm_cap_arm_tmm_populate_region_args args; + void __user *argp = u64_to_user_ptr(cap->args[1]); + + if (copy_from_user(&args, argp, sizeof(args))) { + r = -EFAULT; + break; + } + r = kvm_populate_ipa_cvm_range(kvm, &args); + break; + } + case KVM_CAP_ARM_TMM_ACTIVATE_CVM: + r = kvm_activate_cvm(kvm); + break; default: r = -EINVAL; break; @@ -670,63 +773,6 @@ static int tmi_check_version(void) return 0; }
-static int kvm_kick_boot_vcpu(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu; - unsigned long i; - struct cvm *cvm = (struct cvm *)kvm->arch.cvm; - - if (READ_ONCE(cvm->state) == CVM_STATE_ACTIVE) - return 0; - - mutex_lock(&kvm->lock); - kvm_for_each_vcpu(i, vcpu, kvm) { - if (i == 0) - kvm_vcpu_kick(vcpu); - } - mutex_unlock(&kvm->lock); - return 0; -} - -int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu) -{ - int ret = 0; - struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; - - if (READ_ONCE(cvm->state) == CVM_STATE_ACTIVE) - return ret; - - if (vcpu->vcpu_id == 0) { - ret = kvm_create_all_tecs(vcpu->kvm); - if (ret != 0) - return ret; - } else { - kvm_kick_boot_vcpu(vcpu->kvm); - } - - mutex_lock(&vcpu->kvm->lock); - - if (vcpu->vcpu_id == 0) { - ret = kvm_sel2_map_protected_ipa(vcpu); - if (ret) { - kvm_err("Map protected ipa failed!\n"); - goto unlock_exit; - } - ret = tmi_cvm_activate(cvm->rd); - if (ret) { - kvm_err("tmi_cvm_activate failed!\n"); - goto unlock_exit; - } - - WRITE_ONCE(cvm->state, CVM_STATE_ACTIVE); - kvm_info("cVM%d is activated!\n", cvm->cvm_vmid); - } -unlock_exit: - mutex_unlock(&vcpu->kvm->lock); - - return ret; -} - int kvm_tec_enter(struct kvm_vcpu *vcpu) { struct tmi_tec_run *run; @@ -763,42 +809,6 @@ int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target) return 0; }
-int kvm_arch_tec_init(struct kvm_vcpu *vcpu) -{ - int ret = -ENOMEM; - struct cvm_tec *tec; - struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; - u64 numa_set; - - if (vcpu->arch.tec) { - kvm_err("tec already create.\n"); - return -EFAULT; - } - vcpu->arch.tec = kzalloc(sizeof(struct cvm_tec), GFP_KERNEL_ACCOUNT); - if (!vcpu->arch.tec) - return -ENOMEM; - - tec = (struct cvm_tec *)vcpu->arch.tec; - tec->tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!tec->tec_run) - goto tec_free; - - numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); - tec->tec = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); - if (!tec->tec) { - kvm_err("KVM tmi_mem_alloc failed:%d\n", vcpu->vcpu_id); - goto tec_free; - } - kvm_info("KVM inits cVM VCPU:%d\n", vcpu->vcpu_id); - - return 0; -tec_free: - kfree(tec->tec_run); - kfree(tec); - vcpu->arch.tec = NULL; - return ret; -} - int kvm_init_tmm(void) { int ret; @@ -821,20 +831,6 @@ int kvm_init_tmm(void) return 0; }
-int kvm_init_cvm_vm(struct kvm *kvm) -{ - struct tmi_cvm_params *params; - struct cvm *cvm = (struct cvm *)kvm->arch.cvm; - - params = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!params) - return -ENOMEM; - - cvm->params = params; - - return 0; -} - int kvm_load_user_data(struct kvm *kvm, unsigned long arg) { struct kvm_user_data user_data; @@ -866,6 +862,11 @@ int kvm_load_user_data(struct kvm *kvm, unsigned long arg) return -EFAULT; }
+ kvm_err("%s:(start: 0x%llx, size: 0x%llx, ram: 0x%llx)\n", __func__, + user_data.loader_start, + user_data.initrd_start - user_data.loader_start + user_data.initrd_size, + user_data.ram_size); + cvm->loader_start = user_data.loader_start; cvm->initrd_start = user_data.initrd_start; cvm->initrd_size = user_data.initrd_size; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index bb177d58c..3d0a3f2cc 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -177,6 +177,8 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) { + int ret = 0; + switch (feature) { case KVM_ARM_VCPU_SVE: if (!vcpu_has_sve(vcpu)) @@ -186,9 +188,17 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) return -EPERM;
return kvm_vcpu_finalize_sve(vcpu); +#ifdef CONFIG_CVM_HOST + case KVM_ARM_VCPU_TEC: + if (!kvm_is_cvm(vcpu->kvm)) + return -EINVAL; + mutex_lock(&vcpu->kvm->lock); + ret = kvm_finalize_vcpu_tec(vcpu); + mutex_unlock(&vcpu->kvm->lock); +#endif }
- return -EINVAL; + return ret; }
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 776d326de..53ed1453e 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -13,6 +13,9 @@ #include <linux/kernel.h> #include <linux/mmzone.h> #include <linux/sizes.h> +#ifdef CONFIG_CVM_HOST +#include <linux/kvm_host.h> +#endif
/* MMIO registers */ #define ARM_SMMU_IDR0 0x0