adjusting the interface to adapt to the cca community: cvm: enable secure memory alloc on multiple numa nodes cvm: support observable usage of secure memory cvm: adjusting the interface to adapt to the cca community cvm: fix data_create ram region
arch/arm64/include/asm/kvm_pgtable.h | 3 + arch/arm64/include/asm/kvm_tmi.h | 59 ++- arch/arm64/include/asm/kvm_tmm.h | 8 +- arch/arm64/include/asm/sysreg.h | 3 + arch/arm64/include/uapi/asm/kvm.h | 4 + arch/arm64/kvm/arm.c | 74 +-- arch/arm64/kvm/cvm.c | 476 +++++++++++--------- arch/arm64/kvm/reset.c | 12 +- arch/arm64/kvm/tmi.c | 23 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 3 + include/uapi/linux/kvm.h | 6 +- 11 files changed, 390 insertions(+), 281 deletions(-)
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
--------------------------------
enable cvm secure memory alloc on multiple numa nodes
Signed-off-by: Ju Fu fuju1@huawei.com --- arch/arm64/include/asm/kvm_tmi.h | 8 ++-- arch/arm64/kvm/cvm.c | 80 +++++++++++++++++++++----------- arch/arm64/kvm/tmi.c | 12 ++--- include/uapi/linux/kvm.h | 3 +- 4 files changed, 66 insertions(+), 37 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index 1bba7c7cd..670955518 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -12,7 +12,7 @@
#define GRANULE_SIZE 4096
-#define NO_NUMA -1 +#define NO_NUMA 0 /* every bit in numa set is 0 */
#define TMM_TTT_LEVEL_3 3
@@ -346,7 +346,7 @@ u64 tmi_version(void); u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level); u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level); u64 tmi_cvm_activate(u64 rd); -u64 tmi_cvm_create(u64 rd, u64 params_ptr); +u64 tmi_cvm_create(u64 rd, u64 params_ptr, u64 numa_set); u64 tmi_cvm_destroy(u64 rd); u64 tmi_tec_create(u64 tec, u64 rd, u64 mpidr, u64 params_ptr); u64 tmi_tec_destroy(u64 tec); @@ -361,9 +361,9 @@ u64 tmi_features(u64 index); u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node); u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id);
-u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type, +u64 tmi_mem_alloc(u64 rd, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size); -u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type, +u64 tmi_mem_free(u64 pa, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size);
void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c index 2b58ebf72..c3083b659 100644 --- a/arch/arm64/kvm/cvm.c +++ b/arch/arm64/kvm/cvm.c @@ -129,12 +129,40 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; }
+/* + * the configurable physical numa range in QEMU is 0-127, + * but in real scenarios, 0-63 is sufficient. + */ +static u64 kvm_get_host_numa_set_by_vcpu(u64 vcpu, struct kvm *kvm) +{ + int64_t i; + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + + for (i = 0; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) { + if (test_bit(vcpu, (unsigned long *)numa_info->numa_nodes[i].cpu_id)) + return numa_info->numa_nodes[i].host_numa_nodes[0]; + } + return NO_NUMA; +} + +static u64 kvm_get_first_binded_numa_set(struct kvm *kvm) +{ + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + + if (numa_info->numa_cnt > 0) + return numa_info->numa_nodes[0].host_numa_nodes[0]; + return NO_NUMA; +} + int kvm_arm_create_cvm(struct kvm *kvm) { int ret; struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; unsigned int pgd_sz; struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + u64 numa_set;
if (!kvm_is_cvm(kvm) || kvm_cvm_state(kvm) != CVM_STATE_NONE) return 0; @@ -155,7 +183,8 @@ int kvm_arm_create_cvm(struct kvm *kvm) cvm->params->vmid = cvm->cvm_vmid; cvm->params->ns_vtcr = kvm->arch.vtcr; cvm->params->vttbr_el2 = kvm->arch.mmu.pgd_phys; - ret = tmi_cvm_create(cvm->rd, __pa(cvm->params)); + numa_set = kvm_get_first_binded_numa_set(kvm); + ret = tmi_cvm_create(cvm->rd, __pa(cvm->params), numa_set); if (!ret) kvm_info("KVM creates cVM: %d\n", cvm->cvm_vmid);
@@ -168,6 +197,7 @@ int kvm_arm_create_cvm(struct kvm *kvm) int cvm_create_rd(struct kvm *kvm) { struct cvm *cvm; + u64 numa_set;
if (!static_key_enabled(&kvm_cvm_is_available)) return -EFAULT; @@ -181,8 +211,10 @@ int cvm_create_rd(struct kvm *kvm) if (!kvm->arch.cvm) return -ENOMEM;
+ /* get affine host numa set by default vcpu 0 */ + numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm); cvm = (struct cvm *)kvm->arch.cvm; - cvm->rd = tmi_mem_alloc(cvm->rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); + cvm->rd = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); if (!cvm->rd) { kfree(kvm->arch.cvm); kvm->arch.cvm = NULL; @@ -197,11 +229,13 @@ void kvm_free_rd(struct kvm *kvm) { int ret; struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + u64 numa_set;
if (!cvm->rd) return;
- ret = tmi_mem_free(cvm->rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); + numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm); + ret = tmi_mem_free(cvm->rd, numa_set, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); if (ret) kvm_err("tmi_mem_free for cvm rd failed: %d\n", cvm->cvm_vmid); else @@ -255,14 +289,15 @@ int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm,
while (level++ < max_level) { phys_addr_t ttt; + u64 numa_set = kvm_get_first_binded_numa_set(kvm);
- ttt = tmi_mem_alloc(cvm->rd, NO_NUMA, + ttt = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); if (ttt == 0) return -ENOMEM;
if (kvm_cvm_ttt_create(cvm, ipa, level, ttt)) { - (void)tmi_mem_free(ttt, NO_NUMA, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); + (void)tmi_mem_free(ttt, numa_set, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); return -ENXIO; } } @@ -381,10 +416,8 @@ static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu) { int ret = 0; gpa_t gpa, gpa_data_end, gpa_end, data_size; - u64 i, map_size, dst_phys; + u64 i, map_size, dst_phys, numa_set; u64 l2_granule = cvm_granule_size(2); /* 2MB */ - u64 numa_id = NO_NUMA; - int cur_numa_id; struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; struct kvm_numa_info *numa_info;
@@ -400,12 +433,10 @@ static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu) gpa_data_end = round_up(gpa_data_end, l2_granule); numa_info = &cvm->numa_info;
- /* get the first binded numa id */ - if (numa_info->numa_cnt > 0) - numa_id = numa_info->numa_nodes[0].host_numa_node; + numa_set = kvm_get_first_binded_numa_set(vcpu->kvm); map_size = l2_granule; do { - dst_phys = tmi_mem_alloc(cvm->rd, numa_id, TMM_MEM_TYPE_CVM_PA, map_size); + dst_phys = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_CVM_PA, map_size); if (!dst_phys) { ret = -ENOMEM; kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__); @@ -420,30 +451,23 @@ static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu) gpa += map_size; } while (gpa < gpa_data_end);
- cur_numa_id = numa_node_id(); - if (cur_numa_id < 0) { - ret = -EFAULT; - kvm_err("get current numa node fail\n"); - goto out; - } - if (numa_info->numa_cnt > 0) gpa_end = numa_info->numa_nodes[0].ipa_start + numa_info->numa_nodes[0].ipa_size; /* Map gpa range to secure mem without copy data from host. * The cvm gpa map pages will free by destroy cvm. */ ret = tmi_ttt_map_range(cvm->rd, gpa_data_end, - gpa_end - gpa_data_end, cur_numa_id, numa_id); + gpa_end - gpa_data_end, numa_set, numa_set); if (ret) { kvm_err("tmi_ttt_map_range fail:%d.\n", ret); goto out; }
- for (i = 1; i < numa_info->numa_cnt; i++) { + for (i = 1; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) { struct kvm_numa_node *numa_node = &numa_info->numa_nodes[i];
ret = tmi_ttt_map_range(cvm->rd, numa_node->ipa_start, - numa_node->ipa_size, cur_numa_id, numa_node->host_numa_node); + numa_node->ipa_size, numa_set, numa_node->host_numa_nodes[0]); if (ret) { kvm_err("tmi_ttt_map_range fail:%d.\n", ret); goto out; @@ -459,7 +483,7 @@ int kvm_create_tec(struct kvm_vcpu *vcpu) int i; struct tmi_tec_params *params_ptr; struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu); - uint64_t mpidr = kvm_vcpu_get_mpidr_aff(vcpu); + u64 mpidr = kvm_vcpu_get_mpidr_aff(vcpu); struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
@@ -605,6 +629,7 @@ void kvm_destroy_tec(struct kvm_vcpu *vcpu) { int ret = 0; struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec; + u64 numa_set;
if (!vcpu_is_tec(vcpu)) return; @@ -612,7 +637,8 @@ void kvm_destroy_tec(struct kvm_vcpu *vcpu) if (tmi_tec_destroy(tec->tec) != 0) kvm_err("%s vcpu id : %d failed!\n", __func__, vcpu->vcpu_id);
- ret = tmi_mem_free(tec->tec, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); + numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); + ret = tmi_mem_free(tec->tec, numa_set, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); if (ret != 0) kvm_err("tmi_mem_free for cvm tec failed\n"); tec->tec = 0; @@ -623,7 +649,7 @@ void kvm_destroy_tec(struct kvm_vcpu *vcpu)
static int tmi_check_version(void) { - uint64_t res; + u64 res; int version_major; int version_minor;
@@ -742,6 +768,7 @@ int kvm_arch_tec_init(struct kvm_vcpu *vcpu) int ret = -ENOMEM; struct cvm_tec *tec; struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; + u64 numa_set;
if (vcpu->arch.tec) { kvm_err("tec already create.\n"); @@ -756,7 +783,8 @@ int kvm_arch_tec_init(struct kvm_vcpu *vcpu) if (!tec->tec_run) goto tec_free;
- tec->tec = tmi_mem_alloc(cvm->rd, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); + numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); + tec->tec = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); if (!tec->tec) { kvm_err("KVM tmi_mem_alloc failed:%d\n", vcpu->vcpu_id); goto tec_free; diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c index 83adfc9f0..1e54daacd 100644 --- a/arch/arm64/kvm/tmi.c +++ b/arch/arm64/kvm/tmi.c @@ -37,11 +37,11 @@ u64 tmi_cvm_activate(u64 rd) return res.a1; }
-u64 tmi_cvm_create(u64 rd, u64 params_ptr) +u64 tmi_cvm_create(u64 rd, u64 params_ptr, u64 numa_set) { struct arm_smccc_res res;
- arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, rd, params_ptr, &res); + arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, rd, params_ptr, numa_set, &res); return res.a1; }
@@ -133,21 +133,21 @@ u64 tmi_features(u64 index) return res.a1; }
-u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type, +u64 tmi_mem_alloc(u64 rd, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size) { struct arm_smccc_res res;
- arm_smccc_1_1_smc(TMI_TMM_MEM_ALLOC, rd, numa_id, tmm_mem_type, tmm_map_size, &res); + arm_smccc_1_1_smc(TMI_TMM_MEM_ALLOC, rd, numa_set, tmm_mem_type, tmm_map_size, &res); return res.a1; }
-u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type, +u64 tmi_mem_free(u64 pa, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size) { struct arm_smccc_res res;
- arm_smccc_1_1_smc(TMI_TMM_MEM_FREE, pa, numa_id, tmm_mem_type, tmm_map_size, &res); + arm_smccc_1_1_smc(TMI_TMM_MEM_FREE, pa, numa_set, tmm_mem_type, tmm_map_size, &res); return res.a1; }
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 476934ec6..2f6f3e893 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1378,12 +1378,13 @@ struct kvm_master_dev_info { #define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */ #define MAX_NUMA_NODE 8 #define MAX_CPU_BIT_MAP 4 +#define MAX_NUMA_BIT_MAP 2
struct kvm_numa_node { __u64 numa_id; __u64 ipa_start; __u64 ipa_size; - __u64 host_numa_node; + __u64 host_numa_nodes[MAX_NUMA_BIT_MAP]; __u64 cpu_id[MAX_CPU_BIT_MAP]; };
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
--------------------------------
support observable usage of cvm secure memory
Signed-off-by: Ju Fu fuju1@huawei.com --- arch/arm64/include/asm/kvm_tmi.h | 5 ++++- arch/arm64/kvm/tmi.c | 11 +++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index 670955518..232488161 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -217,7 +217,8 @@ struct tmi_tec_run { #define TMI_FNUM_VERSION U(0x260) #define TMI_FNUM_MEM_ALLOC U(0x261) #define TMI_FNUM_MEM_FREE U(0x262) -#define TMI_FNUM_DATA_CREATE U(0x263) +#define TMI_FNUM_MEM_INFO_SHOW U(0x263) +#define TMI_FNUM_DATA_CREATE U(0x264) #define TMI_FNUM_DATA_DESTROY U(0x265) #define TMI_FNUM_CVM_ACTIVATE U(0x267) #define TMI_FNUM_CVM_CREATE U(0x268) @@ -256,6 +257,7 @@ struct tmi_tec_run { #define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES) #define TMI_TMM_MEM_ALLOC TMI_FID(SMC_64, TMI_FNUM_MEM_ALLOC) #define TMI_TMM_MEM_FREE TMI_FID(SMC_64, TMI_FNUM_MEM_FREE) +#define TMI_TMM_MEM_INFO_SHOW TMI_FID(SMC_64, TMI_FNUM_MEM_INFO_SHOW) #define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE) #define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE)
@@ -365,6 +367,7 @@ u64 tmi_mem_alloc(u64 rd, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size); u64 tmi_mem_free(u64 pa, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size); +u64 tmi_mem_info_show(u64 mem_info_addr);
void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu); int kvm_load_user_data(struct kvm *kvm, unsigned long arg); diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c index 1e54daacd..00ea0cef9 100644 --- a/arch/arm64/kvm/tmi.c +++ b/arch/arm64/kvm/tmi.c @@ -4,6 +4,7 @@ */ #include <linux/arm-smccc.h> #include <asm/kvm_tmi.h> +#include <asm/memory.h>
u64 tmi_version(void) { @@ -151,6 +152,16 @@ u64 tmi_mem_free(u64 pa, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, return res.a1; }
+u64 tmi_mem_info_show(u64 mem_info_addr) +{ + struct arm_smccc_res res; + u64 pa_addr = __pa(mem_info_addr); + + arm_smccc_1_1_smc(TMI_TMM_MEM_INFO_SHOW, pa_addr, &res); + return res.a1; +} +EXPORT_SYMBOL_GPL(tmi_mem_info_show); + u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node) { struct arm_smccc_res res;
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
--------------------------------
adjusting the interface to adapt to the cca community
Signed-off-by: Ju Fu fuju1@huawei.com --- arch/arm64/include/asm/kvm_pgtable.h | 3 + arch/arm64/include/asm/kvm_tmi.h | 44 ++- arch/arm64/include/asm/kvm_tmm.h | 5 +- arch/arm64/include/asm/sysreg.h | 3 + arch/arm64/include/uapi/asm/kvm.h | 4 + arch/arm64/kvm/arm.c | 74 ++-- arch/arm64/kvm/cvm.c | 401 ++++++++++---------- arch/arm64/kvm/reset.c | 12 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 3 + 9 files changed, 300 insertions(+), 249 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index f5dff6d40..2068ec591 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -53,6 +53,9 @@ enum kvm_pgtable_prot { KVM_PGTABLE_PROT_PBHA3 = BIT(62), };
+#define TMI_NO_MEASURE_CONTENT U(0) +#define TMI_MEASURE_CONTENT U(1) + #define PAGE_HYP (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W) #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X) #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R) diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index 232488161..26db4e8b5 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -9,12 +9,14 @@ #include <asm/kvm_asm.h> #include <asm/kvm_pgtable.h> #include <linux/virtio_ring.h> +#include <asm/sysreg.h>
#define GRANULE_SIZE 4096
#define NO_NUMA 0 /* every bit in numa set is 0 */
-#define TMM_TTT_LEVEL_3 3 +#define TMM_TTT_LEVEL_2 2 +#define TMM_TTT_LEVEL_3 3
#ifdef CONFIG_CVM_HOST_FVP_PLAT #define CVM_MEM_BASE ULL(0x8800000000) /* choose FVP platform to run cVM */ @@ -35,17 +37,18 @@ /* TMI error codes. */ #define TMI_SUCCESS 0 #define TMI_ERROR_INPUT 1 -#define TMI_ERROR_MEMORY 2 +#define TMI_ERROR_MEMORY 2 #define TMI_ERROR_ALIAS 3 -#define TMI_ERROR_IN_USE 4 -#define TMI_ERROR_CVM_STATE 5 +#define TMI_ERROR_IN_USE 4 +#define TMI_ERROR_CVM_STATE 5 #define TMI_ERROR_OWNER 6 -#define TMI_ERROR_TEC 7 -#define TMI_ERROR_TTT_WALK 8 -#define TMI_ERROR_TTT_ENTRY 9 -#define TMI_ERROR_NOT_SUPPORTED 10 -#define TMI_ERROR_INTERNAL 11 -#define TMI_ERROR_CVM_POWEROFF 12 +#define TMI_ERROR_TEC 7 +#define TMI_ERROR_TTT_WALK 8 +#define TMI_ERROR_TTT_ENTRY 9 +#define TMI_ERROR_NOT_SUPPORTED 10 +#define TMI_ERROR_INTERNAL 11 +#define TMI_ERROR_CVM_POWEROFF 12 +#define TMI_ERROR_TTT_CREATED 13
#define TMI_RETURN_STATUS(ret) ((ret) & 0xFF) #define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF) @@ -268,10 +271,9 @@ struct tmi_tec_run {
/* KVM_CAP_ARM_TMM on VM fd */ #define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0 -#define KVM_CAP_ARM_TMM_CREATE_CVM 1 -#define KVM_CAP_ARM_TMM_INIT_IPA_CVM 2 -#define KVM_CAP_ARM_TMM_POPULATE_CVM 3 -#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 4 +#define KVM_CAP_ARM_TMM_CREATE_RD 1 +#define KVM_CAP_ARM_TMM_POPULATE_CVM 2 +#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3
#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0 #define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1 @@ -321,6 +323,14 @@ struct kvm_cap_arm_tmm_config_item { }; };
+#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) +struct kvm_cap_arm_tmm_populate_region_args { + __u64 populate_ipa_base; + __u64 populate_ipa_size; + __u32 flags; + __u32 reserved[3]; +}; + enum tmi_tmm_mem_type { TMM_MEM_TYPE_RD, TMM_MEM_TYPE_TEC, @@ -342,6 +352,12 @@ static inline bool tmm_is_addr_ttt_level_aligned(uint64_t addr, int level) return (addr & mask) == 0; }
+static inline bool is_armv8_4_sel2_present(void) +{ + return ((read_sysreg(id_aa64pfr0_el1) >> ID_AA64PFR0_SEL2_SHIFT) & + ID_AA64PFR0_SEL2_MASK) == 1UL; +} + u64 phys_to_cvm_phys(u64 phys);
u64 tmi_version(void); diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index 3452b4429..b828e74e6 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -38,18 +38,15 @@ struct cvm_tec {
int kvm_init_tmm(void); int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); -int kvm_init_cvm_vm(struct kvm *kvm); void kvm_destroy_cvm(struct kvm *kvm); -int kvm_create_tec(struct kvm_vcpu *vcpu); +int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu); void kvm_destroy_tec(struct kvm_vcpu *vcpu); int kvm_tec_enter(struct kvm_vcpu *vcpu); int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status); int kvm_arm_create_cvm(struct kvm *kvm); void kvm_free_rd(struct kvm *kvm); int cvm_create_rd(struct kvm *kvm); -int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu); int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target); -int kvm_arch_tec_init(struct kvm_vcpu *vcpu);
void kvm_cvm_unmap_destroy_range(struct kvm *kvm);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 8bcc9ac99..9b118a4b9 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1326,6 +1326,9 @@ #define ID_AA64PFR0_EL2_SHIFT 8 #define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL0_SHIFT 0 +#ifdef CONFIG_CVM_HOST // TODO 统一排查 +#define ID_AA64PFR0_SEL2_MASK ULL(0xf) +#endif
#define ID_AA64PFR0_MPAM 0x1 #define ID_AA64PFR0_AMU 0x1 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 531ff62e8..a53b16798 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -106,6 +106,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ +#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */
struct kvm_vcpu_init { __u32 target; @@ -354,6 +355,9 @@ struct kvm_vcpu_events { #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 #define KVM_DEV_ARM_ITS_CTRL_RESET 4
+#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256 0 +#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512 1 + /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 #define KVM_ARM_VCPU_PMU_V3_IRQ 0 diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 5372a53a6..dc55b64d1 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -145,6 +145,37 @@ static void set_default_csv2(struct kvm *kvm) kvm->arch.pfr0_csv2 = 1; }
+static int kvm_create_cvm_vm(struct kvm *kvm) +{ + if (!static_key_enabled(&kvm_cvm_is_available)) + return -EFAULT; + + if (kvm->arch.cvm) { + kvm_info("cvm already create.\n"); + return 0; + } + + kvm->arch.cvm = kzalloc(sizeof(struct cvm), GFP_KERNEL_ACCOUNT); + if (!kvm->arch.cvm) + return -ENOMEM; + + return 0; +} + +static int kvm_init_cvm_vm(struct kvm *kvm) +{ + struct tmi_cvm_params *params; + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + + params = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + cvm->params = params; + + return 0; +} + /** * kvm_arch_init_vm - initializes a VM data structure * @kvm: pointer to the KVM struct @@ -160,28 +191,25 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) #endif
#ifdef CONFIG_CVM_HOST + ret = kvm_create_cvm_vm(kvm); + if (ret) + return ret; if (kvm_arm_cvm_type(type)) { - ret = cvm_create_rd(kvm); - if (ret) - return ret; + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + + cvm->is_cvm = true; + if (!kvm_is_cvm(kvm)) + return -EINVAL; } #endif
ret = kvm_arm_setup_stage2(kvm, type); if (ret) -#ifdef CONFIG_CVM_HOST - goto out_free_rd; -#else return ret; -#endif
ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu); if (ret) -#ifdef CONFIG_CVM_HOST - goto out_free_rd; -#else return ret; -#endif
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); if (ret) @@ -193,6 +221,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
set_default_csv2(kvm); + #ifdef CONFIG_CVM_HOST if (kvm_arm_cvm_type(type)) { ret = kvm_init_cvm_vm(kvm); @@ -204,10 +233,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return ret; out_free_stage2_pgd: kvm_free_stage2_pgd(&kvm->arch.mmu); -#ifdef CONFIG_CVM_HOST -out_free_rd: - kvm_free_rd(kvm); -#endif return ret; }
@@ -311,6 +336,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; #ifdef CONFIG_CVM_HOST case KVM_CAP_ARM_TMM: + if (!is_armv8_4_sel2_present()) { + r = -ENXIO; + break; + } r = static_key_enabled(&kvm_cvm_is_available); break; #endif @@ -408,14 +437,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) return err; #endif - -#ifdef CONFIG_CVM_HOST - if (kvm_is_cvm(vcpu->kvm)) { - err = kvm_arch_tec_init(vcpu); - if (err) - return err; - } -#endif return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); }
@@ -924,13 +945,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ret = kvm_vcpu_first_run_init(vcpu); if (ret) return ret; -#ifdef CONFIG_CVM_HOST - if (kvm_is_cvm(vcpu->kvm)) { - ret = kvm_arm_cvm_first_run(vcpu); - if (ret) - return ret; - } -#endif if (run->exit_reason == KVM_EXIT_MMIO) { ret = kvm_handle_mmio_return(vcpu); if (ret) diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c index c3083b659..5c0390f88 100644 --- a/arch/arm64/kvm/cvm.c +++ b/arch/arm64/kvm/cvm.c @@ -162,14 +162,21 @@ int kvm_arm_create_cvm(struct kvm *kvm) struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; unsigned int pgd_sz; struct cvm *cvm = (struct cvm *)kvm->arch.cvm; - u64 numa_set; + /* get affine host numa set by default vcpu 0 */ + u64 numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm);
if (!kvm_is_cvm(kvm) || kvm_cvm_state(kvm) != CVM_STATE_NONE) return 0;
+ cvm->rd = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); + if (cvm->rd == 0) { + kvm_err("tmi_mem_alloc for cvm rd failed: %d\n", cvm->cvm_vmid); + return -ENOMEM; + } + ret = cvm_vmid_reserve(); if (ret < 0) - return ret; + goto out;
cvm->cvm_vmid = ret;
@@ -186,12 +193,17 @@ int kvm_arm_create_cvm(struct kvm *kvm) numa_set = kvm_get_first_binded_numa_set(kvm); ret = tmi_cvm_create(cvm->rd, __pa(cvm->params), numa_set); if (!ret) - kvm_info("KVM creates cVM: %d\n", cvm->cvm_vmid); + kvm_err("KVM creates cVM: %d\n", cvm->cvm_vmid);
WRITE_ONCE(cvm->state, CVM_STATE_NEW); kfree(cvm->params); cvm->params = NULL; return ret; +out: + kfree(cvm->params); + cvm->params = NULL; + kvm_free_rd(kvm); + return ret; }
int cvm_create_rd(struct kvm *kvm) @@ -199,18 +211,6 @@ int cvm_create_rd(struct kvm *kvm) struct cvm *cvm; u64 numa_set;
- if (!static_key_enabled(&kvm_cvm_is_available)) - return -EFAULT; - - if (kvm->arch.cvm) { - kvm_err("cvm already create.\n"); - return -EFAULT; - } - - kvm->arch.cvm = kzalloc(sizeof(struct cvm), GFP_KERNEL_ACCOUNT); - if (!kvm->arch.cvm) - return -ENOMEM; - /* get affine host numa set by default vcpu 0 */ numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm); cvm = (struct cvm *)kvm->arch.cvm; @@ -284,6 +284,7 @@ int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm, int max_level, struct kvm_mmu_memory_cache *mc) { + int ret = 0; if (WARN_ON(level == max_level)) return 0;
@@ -296,7 +297,12 @@ int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm, if (ttt == 0) return -ENOMEM;
- if (kvm_cvm_ttt_create(cvm, ipa, level, ttt)) { + ret = kvm_cvm_ttt_create(cvm, ipa, level, ttt); + if (ret == TMI_ERROR_TTT_CREATED) { + ret = 0; + (void)tmi_mem_free(ttt, numa_set, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); + continue; + } else if (ret) { (void)tmi_mem_free(ttt, numa_set, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); return -ENXIO; } @@ -314,6 +320,10 @@ static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct cvm *cvm,
src_phys = page_to_phys(src_page); ret = tmi_data_create(dst_phys, cvm->rd, ipa, src_phys, level); + if (ret == TMI_ERROR_TTT_CREATED) { + ret = 0; + return ret; + } if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { /* Create missing RTTs and retry */ int level_fault = TMI_RETURN_INDEX(ret); @@ -412,72 +422,34 @@ int kvm_cvm_populate_par_region(struct kvm *kvm, return ret; }
-static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu) +static int kvm_arch_tec_init(struct kvm_vcpu *vcpu) { - int ret = 0; - gpa_t gpa, gpa_data_end, gpa_end, data_size; - u64 i, map_size, dst_phys, numa_set; - u64 l2_granule = cvm_granule_size(2); /* 2MB */ + int ret = -ENOMEM; + u64 numa_set; + struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec; struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; - struct kvm_numa_info *numa_info; - - /* 2MB alignment below addresses*/ - gpa = cvm->loader_start; - gpa_end = cvm->loader_start + cvm->ram_size; - data_size = cvm->initrd_start - cvm->loader_start + - cvm->initrd_size; - data_size = round_up(data_size, l2_granule); - gpa_data_end = cvm->loader_start + data_size + l2_granule; - gpa = round_down(gpa, l2_granule); - gpa_end = round_up(gpa_end, l2_granule); - gpa_data_end = round_up(gpa_data_end, l2_granule); - numa_info = &cvm->numa_info; - - numa_set = kvm_get_first_binded_numa_set(vcpu->kvm); - map_size = l2_granule; - do { - dst_phys = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_CVM_PA, map_size); - if (!dst_phys) { - ret = -ENOMEM; - kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__); - goto out; - }
- ret = kvm_cvm_populate_par_region(vcpu->kvm, gpa, gpa + map_size, dst_phys); - if (ret) { - kvm_err("kvm_cvm_populate_par_region fail:%d.\n", ret); - goto out; - } - gpa += map_size; - } while (gpa < gpa_data_end); + tec->tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!tec->tec_run) + goto tec_free;
- if (numa_info->numa_cnt > 0) - gpa_end = numa_info->numa_nodes[0].ipa_start + numa_info->numa_nodes[0].ipa_size; - /* Map gpa range to secure mem without copy data from host. - * The cvm gpa map pages will free by destroy cvm. - */ - ret = tmi_ttt_map_range(cvm->rd, gpa_data_end, - gpa_end - gpa_data_end, numa_set, numa_set); - if (ret) { - kvm_err("tmi_ttt_map_range fail:%d.\n", ret); - goto out; + numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); + tec->tec = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); + if (!tec->tec) { + kvm_err("KVM tmi_mem_alloc failed:%d\n", vcpu->vcpu_id); + goto tec_free; } + kvm_info("KVM inits cVM VCPU:%d\n", vcpu->vcpu_id);
- for (i = 1; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) { - struct kvm_numa_node *numa_node = &numa_info->numa_nodes[i]; - - ret = tmi_ttt_map_range(cvm->rd, numa_node->ipa_start, - numa_node->ipa_size, numa_set, numa_node->host_numa_nodes[0]); - if (ret) { - kvm_err("tmi_ttt_map_range fail:%d.\n", ret); - goto out; - } - } -out: + return 0; +tec_free: + kfree(tec->tec_run); + kfree(tec); + vcpu->arch.tec = NULL; return ret; }
-int kvm_create_tec(struct kvm_vcpu *vcpu) +static int kvm_create_tec(struct kvm_vcpu *vcpu) { int ret; int i; @@ -503,38 +475,51 @@ int kvm_create_tec(struct kvm_vcpu *vcpu) params_ptr->ram_size = cvm->ram_size; ret = tmi_tec_create(tec->tec, cvm->rd, mpidr, __pa(params_ptr));
+ tec->tec_created = true; kfree(params_ptr);
return ret; }
-static int kvm_create_all_tecs(struct kvm *kvm) +int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu) { int ret = 0; - struct kvm_vcpu *vcpu; - unsigned long i; - struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
- if (READ_ONCE(cvm->state) == CVM_STATE_ACTIVE) - return -1; + if (!vcpu->arch.tec) { + vcpu->arch.tec = kzalloc(sizeof(struct cvm_tec), GFP_KERNEL_ACCOUNT); + if (!vcpu->arch.tec) + return -ENOMEM; + }
- mutex_lock(&kvm->lock); - kvm_for_each_vcpu(i, vcpu, kvm) { - struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec; + ret = kvm_arch_tec_init(vcpu); + if (ret) + return ret; + ret = kvm_create_tec(vcpu); + if (ret) + return ret;
- if (!tec->tec_created) { - ret = kvm_create_tec(vcpu); - if (ret) { - mutex_unlock(&kvm->lock); - return ret; - } - tec->tec_created = true; - } - } - mutex_unlock(&kvm->lock); return ret; }
+static int config_cvm_hash_algo(struct tmi_cvm_params *params, + struct kvm_cap_arm_tmm_config_item *cfg) +{ + switch (cfg->hash_algo) { + case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256: + if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_256)) + return -EINVAL; + break; + case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512: + if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_512)) + return -EINVAL; + break; + default: + return -EINVAL; + } + params->measurement_algo = cfg->hash_algo; + return 0; +} + static int config_cvm_sve(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) { struct cvm *cvm = (struct cvm *)kvm->arch.cvm; @@ -581,6 +566,7 @@ static int config_cvm_pmu(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *c
static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) { + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; struct kvm_cap_arm_tmm_config_item cfg; int r = 0;
@@ -597,6 +583,9 @@ static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) case KVM_CAP_ARM_TMM_CFG_PMU: r = config_cvm_pmu(kvm, &cfg); break; + case KVM_CAP_ARM_TMM_CFG_HASH_ALGO: + r = config_cvm_hash_algo(cvm->params, &cfg); + break; default: r = -EINVAL; } @@ -604,6 +593,106 @@ static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) return r; }
+static int kvm_cvm_map_range(struct kvm *kvm) +{ + int ret; + u64 curr_numa_set; + int idx; + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + gpa_t gpa, gpa_data_end, data_size; + + data_size = cvm->initrd_start - cvm->loader_start + cvm->initrd_size; + data_size = round_up(data_size, l2_granule); + gpa_data_end = cvm->loader_start + data_size + l2_granule; + gpa_data_end = round_up(gpa_data_end, l2_granule); + + curr_numa_set = kvm_get_first_binded_numa_set(kvm); + + gpa = gpa_data_end; + for (idx = 0; idx < numa_info->numa_cnt; idx++) { + struct kvm_numa_node *numa_node = &numa_info->numa_nodes[idx]; + + if (idx) + gpa = numa_node->ipa_start; + if (gpa >= numa_node->ipa_start && + gpa < numa_node->ipa_start + numa_node->ipa_size) { + ret = tmi_ttt_map_range(cvm->rd, gpa, + numa_node->ipa_size - gpa + numa_node->ipa_start, + curr_numa_set, numa_node->host_numa_nodes[0]); + if (ret) { + kvm_err("tmi_ttt_map_range failed: %d.\n", ret); + return ret; + } + } + } + + return ret; +} + +static int kvm_activate_cvm(struct kvm *kvm) +{ + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + + if (kvm_cvm_state(kvm) != CVM_STATE_NEW) + return -EINVAL; + + if (kvm_cvm_map_range(kvm)) + return -EFAULT; + + if (tmi_cvm_activate(cvm->rd)) { + kvm_err("tmi_cvm_activate failed!\n"); + return -ENXIO; + } + + WRITE_ONCE(cvm->state, CVM_STATE_ACTIVE); + kvm_info("cVM%d is activated!\n", cvm->cvm_vmid); + return 0; +} + +static int kvm_populate_ipa_cvm_range(struct kvm *kvm, + struct kvm_cap_arm_tmm_populate_region_args *args) +{ + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + phys_addr_t ipa_base, ipa_end, gpa; + u64 map_size, dst_phys; + u64 numa_set; + + if (kvm_cvm_state(kvm) != CVM_STATE_NEW) + return -EINVAL; + if (!IS_ALIGNED(args->populate_ipa_base, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size, PAGE_SIZE)) + return -EINVAL; + + if (args->flags & ~TMI_MEASURE_CONTENT) + return -EINVAL; + ipa_base = round_down(args->populate_ipa_base, l2_granule); + ipa_end = round_up(args->populate_ipa_base + + args->populate_ipa_size + l2_granule, l2_granule); + + if (ipa_end < ipa_base) + return -EINVAL; + + numa_set = kvm_get_first_binded_numa_set(kvm); + map_size = l2_granule; + for (gpa = ipa_base; gpa < ipa_end; gpa += map_size) { + dst_phys = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_CVM_PA, map_size); + if (!dst_phys) { + kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__); + return -ENOMEM; + } + + if (kvm_cvm_populate_par_region(kvm, gpa, gpa + map_size, dst_phys)) { + kvm_err("kvm_cvm_populate_par_region failed: %d\n", -EFAULT); + return -EFAULT; + } + } + + return 0; +} + int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { int r = 0; @@ -613,9 +702,23 @@ int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) case KVM_CAP_ARM_TMM_CONFIG_CVM_HOST: r = kvm_tmm_config_cvm(kvm, cap); break; - case KVM_CAP_ARM_TMM_CREATE_CVM: + case KVM_CAP_ARM_TMM_CREATE_RD: r = kvm_arm_create_cvm(kvm); break; + case KVM_CAP_ARM_TMM_POPULATE_CVM: { + struct kvm_cap_arm_tmm_populate_region_args args; + void __user *argp = u64_to_user_ptr(cap->args[1]); + + if (copy_from_user(&args, argp, sizeof(args))) { + r = -EFAULT; + break; + } + r = kvm_populate_ipa_cvm_range(kvm, &args); + break; + } + case KVM_CAP_ARM_TMM_ACTIVATE_CVM: + r = kvm_activate_cvm(kvm); + break; default: r = -EINVAL; break; @@ -670,63 +773,6 @@ static int tmi_check_version(void) return 0; }
-static int kvm_kick_boot_vcpu(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu; - unsigned long i; - struct cvm *cvm = (struct cvm *)kvm->arch.cvm; - - if (READ_ONCE(cvm->state) == CVM_STATE_ACTIVE) - return 0; - - mutex_lock(&kvm->lock); - kvm_for_each_vcpu(i, vcpu, kvm) { - if (i == 0) - kvm_vcpu_kick(vcpu); - } - mutex_unlock(&kvm->lock); - return 0; -} - -int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu) -{ - int ret = 0; - struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; - - if (READ_ONCE(cvm->state) == CVM_STATE_ACTIVE) - return ret; - - if (vcpu->vcpu_id == 0) { - ret = kvm_create_all_tecs(vcpu->kvm); - if (ret != 0) - return ret; - } else { - kvm_kick_boot_vcpu(vcpu->kvm); - } - - mutex_lock(&vcpu->kvm->lock); - - if (vcpu->vcpu_id == 0) { - ret = kvm_sel2_map_protected_ipa(vcpu); - if (ret) { - kvm_err("Map protected ipa failed!\n"); - goto unlock_exit; - } - ret = tmi_cvm_activate(cvm->rd); - if (ret) { - kvm_err("tmi_cvm_activate failed!\n"); - goto unlock_exit; - } - - WRITE_ONCE(cvm->state, CVM_STATE_ACTIVE); - kvm_info("cVM%d is activated!\n", cvm->cvm_vmid); - } -unlock_exit: - mutex_unlock(&vcpu->kvm->lock); - - return ret; -} - int kvm_tec_enter(struct kvm_vcpu *vcpu) { struct tmi_tec_run *run; @@ -763,42 +809,6 @@ int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target) return 0; }
-int kvm_arch_tec_init(struct kvm_vcpu *vcpu) -{ - int ret = -ENOMEM; - struct cvm_tec *tec; - struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; - u64 numa_set; - - if (vcpu->arch.tec) { - kvm_err("tec already create.\n"); - return -EFAULT; - } - vcpu->arch.tec = kzalloc(sizeof(struct cvm_tec), GFP_KERNEL_ACCOUNT); - if (!vcpu->arch.tec) - return -ENOMEM; - - tec = (struct cvm_tec *)vcpu->arch.tec; - tec->tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!tec->tec_run) - goto tec_free; - - numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); - tec->tec = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); - if (!tec->tec) { - kvm_err("KVM tmi_mem_alloc failed:%d\n", vcpu->vcpu_id); - goto tec_free; - } - kvm_info("KVM inits cVM VCPU:%d\n", vcpu->vcpu_id); - - return 0; -tec_free: - kfree(tec->tec_run); - kfree(tec); - vcpu->arch.tec = NULL; - return ret; -} - int kvm_init_tmm(void) { int ret; @@ -821,20 +831,6 @@ int kvm_init_tmm(void) return 0; }
-int kvm_init_cvm_vm(struct kvm *kvm) -{ - struct tmi_cvm_params *params; - struct cvm *cvm = (struct cvm *)kvm->arch.cvm; - - params = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!params) - return -ENOMEM; - - cvm->params = params; - - return 0; -} - int kvm_load_user_data(struct kvm *kvm, unsigned long arg) { struct kvm_user_data user_data; @@ -866,6 +862,11 @@ int kvm_load_user_data(struct kvm *kvm, unsigned long arg) return -EFAULT; }
+ kvm_err("%s:(start: 0x%llx, size: 0x%llx, ram: 0x%llx)\n", __func__, + user_data.loader_start, + user_data.initrd_start - user_data.loader_start + user_data.initrd_size, + user_data.ram_size); + cvm->loader_start = user_data.loader_start; cvm->initrd_start = user_data.initrd_start; cvm->initrd_size = user_data.initrd_size; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index bb177d58c..3d0a3f2cc 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -177,6 +177,8 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) { + int ret = 0; + switch (feature) { case KVM_ARM_VCPU_SVE: if (!vcpu_has_sve(vcpu)) @@ -186,9 +188,17 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) return -EPERM;
return kvm_vcpu_finalize_sve(vcpu); +#ifdef CONFIG_CVM_HOST + case KVM_ARM_VCPU_TEC: + if (!kvm_is_cvm(vcpu->kvm)) + return -EINVAL; + mutex_lock(&vcpu->kvm->lock); + ret = kvm_finalize_vcpu_tec(vcpu); + mutex_unlock(&vcpu->kvm->lock); +#endif }
- return -EINVAL; + return ret; }
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 776d326de..53ed1453e 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -13,6 +13,9 @@ #include <linux/kernel.h> #include <linux/mmzone.h> #include <linux/sizes.h> +#ifdef CONFIG_CVM_HOST +#include <linux/kvm_host.h> +#endif
/* MMIO registers */ #define ARM_SMMU_IDR0 0x0
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
--------------------------------
fix cvm data_create ram region
Signed-off-by: Ju Fu fuju1@huawei.com --- arch/arm64/include/asm/kvm_tmi.h | 6 ++- arch/arm64/include/asm/kvm_tmm.h | 3 +- arch/arm64/kvm/cvm.c | 69 ++++++++++++++++++-------------- include/uapi/linux/kvm.h | 3 +- 4 files changed, 48 insertions(+), 33 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index 26db4e8b5..7deb204bc 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -325,8 +325,10 @@ struct kvm_cap_arm_tmm_config_item {
#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) struct kvm_cap_arm_tmm_populate_region_args { - __u64 populate_ipa_base; - __u64 populate_ipa_size; + __u64 populate_ipa_base1; + __u64 populate_ipa_size1; + __u64 populate_ipa_base2; + __u64 populate_ipa_size2; __u32 flags; __u32 reserved[3]; }; diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index b828e74e6..1cc2470cc 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -19,8 +19,9 @@ struct cvm { u32 cvm_vmid; u64 rd; u64 loader_start; + u64 image_end; u64 initrd_start; - u64 initrd_size; + u64 dtb_end; u64 ram_size; struct kvm_numa_info numa_info; struct tmi_cvm_params *params; diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c index 5c0390f88..2757813bb 100644 --- a/arch/arm64/kvm/cvm.c +++ b/arch/arm64/kvm/cvm.c @@ -603,7 +603,7 @@ static int kvm_cvm_map_range(struct kvm *kvm) struct kvm_numa_info *numa_info = &cvm->numa_info; gpa_t gpa, gpa_data_end, data_size;
- data_size = cvm->initrd_start - cvm->loader_start + cvm->initrd_size; + data_size = cvm->initrd_start - cvm->loader_start + cvm->dtb_end; data_size = round_up(data_size, l2_granule); gpa_data_end = cvm->loader_start + data_size + l2_granule; gpa_data_end = round_up(gpa_data_end, l2_granule); @@ -651,48 +651,58 @@ static int kvm_activate_cvm(struct kvm *kvm) return 0; }
-static int kvm_populate_ipa_cvm_range(struct kvm *kvm, - struct kvm_cap_arm_tmm_populate_region_args *args) +static int kvm_populate_ram_region(struct kvm *kvm, u64 map_size, + phys_addr_t ipa_base, phys_addr_t ipa_end) { + u64 dst_phys; + phys_addr_t gpa; + u64 numa_set = kvm_get_first_binded_numa_set(kvm); struct cvm *cvm = (struct cvm *)kvm->arch.cvm; - u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); - phys_addr_t ipa_base, ipa_end, gpa; - u64 map_size, dst_phys; - u64 numa_set; - - if (kvm_cvm_state(kvm) != CVM_STATE_NEW) - return -EINVAL; - if (!IS_ALIGNED(args->populate_ipa_base, PAGE_SIZE) || - !IS_ALIGNED(args->populate_ipa_size, PAGE_SIZE)) - return -EINVAL;
- if (args->flags & ~TMI_MEASURE_CONTENT) - return -EINVAL; - ipa_base = round_down(args->populate_ipa_base, l2_granule); - ipa_end = round_up(args->populate_ipa_base + - args->populate_ipa_size + l2_granule, l2_granule); - - if (ipa_end < ipa_base) - return -EINVAL; - - numa_set = kvm_get_first_binded_numa_set(kvm); - map_size = l2_granule; for (gpa = ipa_base; gpa < ipa_end; gpa += map_size) { dst_phys = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_CVM_PA, map_size); if (!dst_phys) { kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__); return -ENOMEM; } - if (kvm_cvm_populate_par_region(kvm, gpa, gpa + map_size, dst_phys)) { kvm_err("kvm_cvm_populate_par_region failed: %d\n", -EFAULT); return -EFAULT; } } - return 0; }
+static int kvm_populate_ipa_cvm_range(struct kvm *kvm, + struct kvm_cap_arm_tmm_populate_region_args *args) +{ + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + phys_addr_t ipa_base1, ipa_end1, ipa_base2, ipa_end2; + + if (kvm_cvm_state(kvm) != CVM_STATE_NEW) + return -EINVAL; + if (!IS_ALIGNED(args->populate_ipa_base1, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size1, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_base2, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size2, PAGE_SIZE)) + return -EINVAL; + + if (args->flags & ~TMI_MEASURE_CONTENT) + return -EINVAL; + ipa_base1 = round_down(args->populate_ipa_base1, l2_granule); + ipa_end1 = round_up(args->populate_ipa_base1 + + args->populate_ipa_size1 + l2_granule, l2_granule); + ipa_base2 = round_down(args->populate_ipa_base2, l2_granule); + ipa_end2 = round_up(args->populate_ipa_base2 + + args->populate_ipa_size2 + l2_granule, l2_granule); + + if (ipa_end1 < ipa_base1 || ipa_end2 < ipa_base2) + return -EINVAL; + + return kvm_populate_ram_region(kvm, l2_granule, ipa_base1, ipa_end1) || + kvm_populate_ram_region(kvm, l2_granule, ipa_base2, ipa_end2); +} + int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { int r = 0; @@ -854,7 +864,7 @@ int kvm_load_user_data(struct kvm *kvm, unsigned long arg) unsigned long ipa_end = numa_node->ipa_start + numa_node->ipa_size;
if (user_data.loader_start < numa_node->ipa_start || - user_data.initrd_start + user_data.initrd_size > ipa_end) + user_data.dtb_end > ipa_end) return -EFAULT; for (i = 0; i < numa_info->numa_cnt; i++) total_size += numa_info->numa_nodes[i].ipa_size; @@ -864,12 +874,13 @@ int kvm_load_user_data(struct kvm *kvm, unsigned long arg)
kvm_err("%s:(start: 0x%llx, size: 0x%llx, ram: 0x%llx)\n", __func__, user_data.loader_start, - user_data.initrd_start - user_data.loader_start + user_data.initrd_size, + user_data.initrd_start - user_data.loader_start + user_data.dtb_end, user_data.ram_size);
cvm->loader_start = user_data.loader_start; + cvm->image_end = user_data.image_end; cvm->initrd_start = user_data.initrd_start; - cvm->initrd_size = user_data.initrd_size; + cvm->dtb_end = user_data.dtb_end; cvm->ram_size = user_data.ram_size; memcpy(&cvm->numa_info, numa_info, sizeof(struct kvm_numa_info));
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2f6f3e893..c8e7fad3f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1395,8 +1395,9 @@ struct kvm_numa_info {
struct kvm_user_data { __u64 loader_start; + __u64 image_end; __u64 initrd_start; - __u64 initrd_size; + __u64 dtb_end; __u64 ram_size; struct kvm_numa_info numa_info; };
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/8070 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/7...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/8070 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/7...