virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
--------------------------------
fix cvm data_create ram region
Signed-off-by: Ju Fu fuju1@huawei.com --- arch/arm64/include/asm/kvm_tmi.h | 6 ++- arch/arm64/include/asm/kvm_tmm.h | 3 +- arch/arm64/kvm/cvm.c | 69 ++++++++++++++++++-------------- include/uapi/linux/kvm.h | 3 +- 4 files changed, 48 insertions(+), 33 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index 26db4e8b5..7deb204bc 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -325,8 +325,10 @@ struct kvm_cap_arm_tmm_config_item {
#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) struct kvm_cap_arm_tmm_populate_region_args { - __u64 populate_ipa_base; - __u64 populate_ipa_size; + __u64 populate_ipa_base1; + __u64 populate_ipa_size1; + __u64 populate_ipa_base2; + __u64 populate_ipa_size2; __u32 flags; __u32 reserved[3]; }; diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index b828e74e6..1cc2470cc 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -19,8 +19,9 @@ struct cvm { u32 cvm_vmid; u64 rd; u64 loader_start; + u64 image_end; u64 initrd_start; - u64 initrd_size; + u64 dtb_end; u64 ram_size; struct kvm_numa_info numa_info; struct tmi_cvm_params *params; diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c index 5c0390f88..2757813bb 100644 --- a/arch/arm64/kvm/cvm.c +++ b/arch/arm64/kvm/cvm.c @@ -603,7 +603,7 @@ static int kvm_cvm_map_range(struct kvm *kvm) struct kvm_numa_info *numa_info = &cvm->numa_info; gpa_t gpa, gpa_data_end, data_size;
- data_size = cvm->initrd_start - cvm->loader_start + cvm->initrd_size; + data_size = cvm->initrd_start - cvm->loader_start + cvm->dtb_end; data_size = round_up(data_size, l2_granule); gpa_data_end = cvm->loader_start + data_size + l2_granule; gpa_data_end = round_up(gpa_data_end, l2_granule); @@ -651,48 +651,58 @@ static int kvm_activate_cvm(struct kvm *kvm) return 0; }
-static int kvm_populate_ipa_cvm_range(struct kvm *kvm, - struct kvm_cap_arm_tmm_populate_region_args *args) +static int kvm_populate_ram_region(struct kvm *kvm, u64 map_size, + phys_addr_t ipa_base, phys_addr_t ipa_end) { + u64 dst_phys; + phys_addr_t gpa; + u64 numa_set = kvm_get_first_binded_numa_set(kvm); struct cvm *cvm = (struct cvm *)kvm->arch.cvm; - u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); - phys_addr_t ipa_base, ipa_end, gpa; - u64 map_size, dst_phys; - u64 numa_set; - - if (kvm_cvm_state(kvm) != CVM_STATE_NEW) - return -EINVAL; - if (!IS_ALIGNED(args->populate_ipa_base, PAGE_SIZE) || - !IS_ALIGNED(args->populate_ipa_size, PAGE_SIZE)) - return -EINVAL;
- if (args->flags & ~TMI_MEASURE_CONTENT) - return -EINVAL; - ipa_base = round_down(args->populate_ipa_base, l2_granule); - ipa_end = round_up(args->populate_ipa_base + - args->populate_ipa_size + l2_granule, l2_granule); - - if (ipa_end < ipa_base) - return -EINVAL; - - numa_set = kvm_get_first_binded_numa_set(kvm); - map_size = l2_granule; for (gpa = ipa_base; gpa < ipa_end; gpa += map_size) { dst_phys = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_CVM_PA, map_size); if (!dst_phys) { kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__); return -ENOMEM; } - if (kvm_cvm_populate_par_region(kvm, gpa, gpa + map_size, dst_phys)) { kvm_err("kvm_cvm_populate_par_region failed: %d\n", -EFAULT); return -EFAULT; } } - return 0; }
+static int kvm_populate_ipa_cvm_range(struct kvm *kvm, + struct kvm_cap_arm_tmm_populate_region_args *args) +{ + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + phys_addr_t ipa_base1, ipa_end1, ipa_base2, ipa_end2; + + if (kvm_cvm_state(kvm) != CVM_STATE_NEW) + return -EINVAL; + if (!IS_ALIGNED(args->populate_ipa_base1, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size1, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_base2, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size2, PAGE_SIZE)) + return -EINVAL; + + if (args->flags & ~TMI_MEASURE_CONTENT) + return -EINVAL; + ipa_base1 = round_down(args->populate_ipa_base1, l2_granule); + ipa_end1 = round_up(args->populate_ipa_base1 + + args->populate_ipa_size1 + l2_granule, l2_granule); + ipa_base2 = round_down(args->populate_ipa_base2, l2_granule); + ipa_end2 = round_up(args->populate_ipa_base2 + + args->populate_ipa_size2 + l2_granule, l2_granule); + + if (ipa_end1 < ipa_base1 || ipa_end2 < ipa_base2) + return -EINVAL; + + return kvm_populate_ram_region(kvm, l2_granule, ipa_base1, ipa_end1) || + kvm_populate_ram_region(kvm, l2_granule, ipa_base2, ipa_end2); +} + int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { int r = 0; @@ -854,7 +864,7 @@ int kvm_load_user_data(struct kvm *kvm, unsigned long arg) unsigned long ipa_end = numa_node->ipa_start + numa_node->ipa_size;
if (user_data.loader_start < numa_node->ipa_start || - user_data.initrd_start + user_data.initrd_size > ipa_end) + user_data.dtb_end > ipa_end) return -EFAULT; for (i = 0; i < numa_info->numa_cnt; i++) total_size += numa_info->numa_nodes[i].ipa_size; @@ -864,12 +874,13 @@ int kvm_load_user_data(struct kvm *kvm, unsigned long arg)
kvm_err("%s:(start: 0x%llx, size: 0x%llx, ram: 0x%llx)\n", __func__, user_data.loader_start, - user_data.initrd_start - user_data.loader_start + user_data.initrd_size, + user_data.initrd_start - user_data.loader_start + user_data.dtb_end, user_data.ram_size);
cvm->loader_start = user_data.loader_start; + cvm->image_end = user_data.image_end; cvm->initrd_start = user_data.initrd_start; - cvm->initrd_size = user_data.initrd_size; + cvm->dtb_end = user_data.dtb_end; cvm->ram_size = user_data.ram_size; memcpy(&cvm->numa_info, numa_info, sizeof(struct kvm_numa_info));
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2f6f3e893..c8e7fad3f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1395,8 +1395,9 @@ struct kvm_numa_info {
struct kvm_user_data { __u64 loader_start; + __u64 image_end; __u64 initrd_start; - __u64 initrd_size; + __u64 dtb_end; __u64 ram_size; struct kvm_numa_info numa_info; };