virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
--------------------------------
enable cvm secure memory alloc on multiple numa nodes
Signed-off-by: Ju Fu fuju1@huawei.com --- arch/arm64/include/asm/kvm_tmi.h | 8 ++-- arch/arm64/kvm/cvm.c | 80 +++++++++++++++++++++----------- arch/arm64/kvm/tmi.c | 12 ++--- include/uapi/linux/kvm.h | 3 +- 4 files changed, 66 insertions(+), 37 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index 1bba7c7cd..670955518 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -12,7 +12,7 @@
#define GRANULE_SIZE 4096
-#define NO_NUMA -1 +#define NO_NUMA 0 /* every bit in numa set is 0 */
#define TMM_TTT_LEVEL_3 3
@@ -346,7 +346,7 @@ u64 tmi_version(void); u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level); u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level); u64 tmi_cvm_activate(u64 rd); -u64 tmi_cvm_create(u64 rd, u64 params_ptr); +u64 tmi_cvm_create(u64 rd, u64 params_ptr, u64 numa_set); u64 tmi_cvm_destroy(u64 rd); u64 tmi_tec_create(u64 tec, u64 rd, u64 mpidr, u64 params_ptr); u64 tmi_tec_destroy(u64 tec); @@ -361,9 +361,9 @@ u64 tmi_features(u64 index); u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node); u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id);
-u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type, +u64 tmi_mem_alloc(u64 rd, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size); -u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type, +u64 tmi_mem_free(u64 pa, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size);
void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c index 2b58ebf72..c3083b659 100644 --- a/arch/arm64/kvm/cvm.c +++ b/arch/arm64/kvm/cvm.c @@ -129,12 +129,40 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; }
+/* + * the configurable physical numa range in QEMU is 0-127, + * but in real scenarios, 0-63 is sufficient. + */ +static u64 kvm_get_host_numa_set_by_vcpu(u64 vcpu, struct kvm *kvm) +{ + int64_t i; + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + + for (i = 0; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) { + if (test_bit(vcpu, (unsigned long *)numa_info->numa_nodes[i].cpu_id)) + return numa_info->numa_nodes[i].host_numa_nodes[0]; + } + return NO_NUMA; +} + +static u64 kvm_get_first_binded_numa_set(struct kvm *kvm) +{ + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + + if (numa_info->numa_cnt > 0) + return numa_info->numa_nodes[0].host_numa_nodes[0]; + return NO_NUMA; +} + int kvm_arm_create_cvm(struct kvm *kvm) { int ret; struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; unsigned int pgd_sz; struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + u64 numa_set;
if (!kvm_is_cvm(kvm) || kvm_cvm_state(kvm) != CVM_STATE_NONE) return 0; @@ -155,7 +183,8 @@ int kvm_arm_create_cvm(struct kvm *kvm) cvm->params->vmid = cvm->cvm_vmid; cvm->params->ns_vtcr = kvm->arch.vtcr; cvm->params->vttbr_el2 = kvm->arch.mmu.pgd_phys; - ret = tmi_cvm_create(cvm->rd, __pa(cvm->params)); + numa_set = kvm_get_first_binded_numa_set(kvm); + ret = tmi_cvm_create(cvm->rd, __pa(cvm->params), numa_set); if (!ret) kvm_info("KVM creates cVM: %d\n", cvm->cvm_vmid);
@@ -168,6 +197,7 @@ int kvm_arm_create_cvm(struct kvm *kvm) int cvm_create_rd(struct kvm *kvm) { struct cvm *cvm; + u64 numa_set;
if (!static_key_enabled(&kvm_cvm_is_available)) return -EFAULT; @@ -181,8 +211,10 @@ int cvm_create_rd(struct kvm *kvm) if (!kvm->arch.cvm) return -ENOMEM;
+ /* get affine host numa set by default vcpu 0 */ + numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm); cvm = (struct cvm *)kvm->arch.cvm; - cvm->rd = tmi_mem_alloc(cvm->rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); + cvm->rd = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); if (!cvm->rd) { kfree(kvm->arch.cvm); kvm->arch.cvm = NULL; @@ -197,11 +229,13 @@ void kvm_free_rd(struct kvm *kvm) { int ret; struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + u64 numa_set;
if (!cvm->rd) return;
- ret = tmi_mem_free(cvm->rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); + numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm); + ret = tmi_mem_free(cvm->rd, numa_set, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX); if (ret) kvm_err("tmi_mem_free for cvm rd failed: %d\n", cvm->cvm_vmid); else @@ -255,14 +289,15 @@ int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm,
while (level++ < max_level) { phys_addr_t ttt; + u64 numa_set = kvm_get_first_binded_numa_set(kvm);
- ttt = tmi_mem_alloc(cvm->rd, NO_NUMA, + ttt = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); if (ttt == 0) return -ENOMEM;
if (kvm_cvm_ttt_create(cvm, ipa, level, ttt)) { - (void)tmi_mem_free(ttt, NO_NUMA, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); + (void)tmi_mem_free(ttt, numa_set, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX); return -ENXIO; } } @@ -381,10 +416,8 @@ static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu) { int ret = 0; gpa_t gpa, gpa_data_end, gpa_end, data_size; - u64 i, map_size, dst_phys; + u64 i, map_size, dst_phys, numa_set; u64 l2_granule = cvm_granule_size(2); /* 2MB */ - u64 numa_id = NO_NUMA; - int cur_numa_id; struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; struct kvm_numa_info *numa_info;
@@ -400,12 +433,10 @@ static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu) gpa_data_end = round_up(gpa_data_end, l2_granule); numa_info = &cvm->numa_info;
- /* get the first binded numa id */ - if (numa_info->numa_cnt > 0) - numa_id = numa_info->numa_nodes[0].host_numa_node; + numa_set = kvm_get_first_binded_numa_set(vcpu->kvm); map_size = l2_granule; do { - dst_phys = tmi_mem_alloc(cvm->rd, numa_id, TMM_MEM_TYPE_CVM_PA, map_size); + dst_phys = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_CVM_PA, map_size); if (!dst_phys) { ret = -ENOMEM; kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__); @@ -420,30 +451,23 @@ static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu) gpa += map_size; } while (gpa < gpa_data_end);
- cur_numa_id = numa_node_id(); - if (cur_numa_id < 0) { - ret = -EFAULT; - kvm_err("get current numa node fail\n"); - goto out; - } - if (numa_info->numa_cnt > 0) gpa_end = numa_info->numa_nodes[0].ipa_start + numa_info->numa_nodes[0].ipa_size; /* Map gpa range to secure mem without copy data from host. * The cvm gpa map pages will free by destroy cvm. */ ret = tmi_ttt_map_range(cvm->rd, gpa_data_end, - gpa_end - gpa_data_end, cur_numa_id, numa_id); + gpa_end - gpa_data_end, numa_set, numa_set); if (ret) { kvm_err("tmi_ttt_map_range fail:%d.\n", ret); goto out; }
- for (i = 1; i < numa_info->numa_cnt; i++) { + for (i = 1; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) { struct kvm_numa_node *numa_node = &numa_info->numa_nodes[i];
ret = tmi_ttt_map_range(cvm->rd, numa_node->ipa_start, - numa_node->ipa_size, cur_numa_id, numa_node->host_numa_node); + numa_node->ipa_size, numa_set, numa_node->host_numa_nodes[0]); if (ret) { kvm_err("tmi_ttt_map_range fail:%d.\n", ret); goto out; @@ -459,7 +483,7 @@ int kvm_create_tec(struct kvm_vcpu *vcpu) int i; struct tmi_tec_params *params_ptr; struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu); - uint64_t mpidr = kvm_vcpu_get_mpidr_aff(vcpu); + u64 mpidr = kvm_vcpu_get_mpidr_aff(vcpu); struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec;
@@ -605,6 +629,7 @@ void kvm_destroy_tec(struct kvm_vcpu *vcpu) { int ret = 0; struct cvm_tec *tec = (struct cvm_tec *)vcpu->arch.tec; + u64 numa_set;
if (!vcpu_is_tec(vcpu)) return; @@ -612,7 +637,8 @@ void kvm_destroy_tec(struct kvm_vcpu *vcpu) if (tmi_tec_destroy(tec->tec) != 0) kvm_err("%s vcpu id : %d failed!\n", __func__, vcpu->vcpu_id);
- ret = tmi_mem_free(tec->tec, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); + numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); + ret = tmi_mem_free(tec->tec, numa_set, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); if (ret != 0) kvm_err("tmi_mem_free for cvm tec failed\n"); tec->tec = 0; @@ -623,7 +649,7 @@ void kvm_destroy_tec(struct kvm_vcpu *vcpu)
static int tmi_check_version(void) { - uint64_t res; + u64 res; int version_major; int version_minor;
@@ -742,6 +768,7 @@ int kvm_arch_tec_init(struct kvm_vcpu *vcpu) int ret = -ENOMEM; struct cvm_tec *tec; struct cvm *cvm = (struct cvm *)vcpu->kvm->arch.cvm; + u64 numa_set;
if (vcpu->arch.tec) { kvm_err("tec already create.\n"); @@ -756,7 +783,8 @@ int kvm_arch_tec_init(struct kvm_vcpu *vcpu) if (!tec->tec_run) goto tec_free;
- tec->tec = tmi_mem_alloc(cvm->rd, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); + numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); + tec->tec = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX); if (!tec->tec) { kvm_err("KVM tmi_mem_alloc failed:%d\n", vcpu->vcpu_id); goto tec_free; diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c index 83adfc9f0..1e54daacd 100644 --- a/arch/arm64/kvm/tmi.c +++ b/arch/arm64/kvm/tmi.c @@ -37,11 +37,11 @@ u64 tmi_cvm_activate(u64 rd) return res.a1; }
-u64 tmi_cvm_create(u64 rd, u64 params_ptr) +u64 tmi_cvm_create(u64 rd, u64 params_ptr, u64 numa_set) { struct arm_smccc_res res;
- arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, rd, params_ptr, &res); + arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, rd, params_ptr, numa_set, &res); return res.a1; }
@@ -133,21 +133,21 @@ u64 tmi_features(u64 index) return res.a1; }
-u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type, +u64 tmi_mem_alloc(u64 rd, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size) { struct arm_smccc_res res;
- arm_smccc_1_1_smc(TMI_TMM_MEM_ALLOC, rd, numa_id, tmm_mem_type, tmm_map_size, &res); + arm_smccc_1_1_smc(TMI_TMM_MEM_ALLOC, rd, numa_set, tmm_mem_type, tmm_map_size, &res); return res.a1; }
-u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type, +u64 tmi_mem_free(u64 pa, u64 numa_set, enum tmi_tmm_mem_type tmm_mem_type, enum tmi_tmm_map_size tmm_map_size) { struct arm_smccc_res res;
- arm_smccc_1_1_smc(TMI_TMM_MEM_FREE, pa, numa_id, tmm_mem_type, tmm_map_size, &res); + arm_smccc_1_1_smc(TMI_TMM_MEM_FREE, pa, numa_set, tmm_mem_type, tmm_map_size, &res); return res.a1; }
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 476934ec6..2f6f3e893 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1378,12 +1378,13 @@ struct kvm_master_dev_info { #define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */ #define MAX_NUMA_NODE 8 #define MAX_CPU_BIT_MAP 4 +#define MAX_NUMA_BIT_MAP 2
struct kvm_numa_node { __u64 numa_id; __u64 ipa_start; __u64 ipa_size; - __u64 host_numa_node; + __u64 host_numa_nodes[MAX_NUMA_BIT_MAP]; __u64 cpu_id[MAX_CPU_BIT_MAP]; };