virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
--------------------------------
fix cvm data_create ram region
Signed-off-by: Ju Fu fuju1@huawei.com --- arch/arm64/include/asm/kvm_tmi.h | 12 ++-- arch/arm64/include/asm/kvm_tmm.h | 3 +- arch/arm64/include/uapi/asm/kvm.h | 2 +- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/cvm.c | 105 ++++++++++++++++++------------ include/uapi/linux/kvm.h | 3 +- 6 files changed, 79 insertions(+), 48 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index e58d266d6..8b90cb81e 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -326,15 +326,19 @@ struct kvm_cap_arm_tmm_config_item {
#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) struct kvm_cap_arm_tmm_populate_region_args { - __u64 populate_ipa_base; - __u64 populate_ipa_size; + __u64 populate_ipa_base1; + __u64 populate_ipa_size1; + __u64 populate_ipa_base2; + __u64 populate_ipa_size2; __u32 flags; __u32 reserved[3]; };
struct kvm_cap_arm_tmm_init_ipa_args { - __u64 init_ipa_base; - __u64 init_ipa_size; + __u64 init_ipa_base1; + __u64 init_ipa_size1; + __u64 init_ipa_base2; + __u64 init_ipa_size2; __u32 reserved[4]; };
diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index 290c61ffa..94e73c0dd 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -19,8 +19,9 @@ struct cvm { u32 cvm_vmid; u64 rd; u64 loader_start; + u64 image_end; u64 initrd_start; - u64 initrd_size; + u64 dtb_end; u64 ram_size; struct kvm_numa_info numa_info; struct tmi_cvm_params *params; diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f44c65ea0..a53b16798 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -106,7 +106,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ -#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */ +#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */
struct kvm_vcpu_init { __u32 target; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0bb6792b7..8dad1a310 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -145,7 +145,7 @@ static void set_default_csv2(struct kvm *kvm) kvm->arch.pfr0_csv2 = 1; }
-/* +/** * kvm_arch_init_vm - initializes a VM data structure * @kvm: pointer to the KVM struct */ diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c index eb988f10a..3f0370615 100644 --- a/arch/arm64/kvm/cvm.c +++ b/arch/arm64/kvm/cvm.c @@ -564,7 +564,7 @@ static int kvm_cvm_map_range(struct kvm *kvm) struct kvm_numa_info *numa_info = &cvm->numa_info; gpa_t gpa, gpa_data_end, data_size;
- data_size = cvm->initrd_start - cvm->loader_start + cvm->initrd_size; + data_size = cvm->initrd_start - cvm->loader_start + cvm->dtb_end; data_size = round_up(data_size, l2_granule); gpa_data_end = cvm->loader_start + data_size + l2_granule; gpa_data_end = round_up(gpa_data_end, l2_granule); @@ -612,62 +612,57 @@ static int kvm_activate_cvm(struct kvm *kvm) return 0; }
-static int kvm_init_ipa_cvm_range(struct kvm *kvm, - struct kvm_cap_arm_tmm_init_ipa_args *args) +static int kvm_cvm_create_ttt(struct kvm *kvm, u64 granule_size, + gpa_t addr, gpa_t end) { int ret = 0; - u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); - gpa_t addr, end; gpa_t ipa; struct cvm *cvm = (struct cvm *)kvm->arch.cvm;
- addr = round_down(args->init_ipa_base, l2_granule); - end = round_up(args->init_ipa_base + args->init_ipa_size, l2_granule); - - if (end < addr) - return -EINVAL; - - if (kvm_cvm_state(kvm) != CVM_STATE_NEW) - return -EINVAL; - - for (ipa = addr; ipa < end; ipa += l2_granule) { - ret = kvm_cvm_create_ttt_levels(kvm, cvm, ipa, - kvm->arch.mmu.pgt->start_level, - TMM_TTT_LEVEL_2, NULL); + for (ipa = addr; ipa < end; ipa += granule_size) { + ret = kvm_cvm_create_ttt_levels(kvm, cvm, + ipa, kvm->arch.mmu.pgt->start_level, + TMM_TTT_LEVEL_2, NULL); WARN_ON(ret); if (ret) return ret; } - return ret; }
-static int kvm_populate_ipa_cvm_range(struct kvm *kvm, - struct kvm_cap_arm_tmm_populate_region_args *args) +static int kvm_init_ipa_cvm_range(struct kvm *kvm, + struct kvm_cap_arm_tmm_init_ipa_args *args) { - struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + int ret = 0; + gpa_t addr1, end1, addr2, end2; u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); - phys_addr_t ipa_base, ipa_end, gpa; - u64 map_size, dst_phys; - u64 numa_set;
- if (kvm_cvm_state(kvm) != CVM_STATE_NEW) - return -EINVAL; - if (!IS_ALIGNED(args->populate_ipa_base, PAGE_SIZE) || - !IS_ALIGNED(args->populate_ipa_size, PAGE_SIZE)) - return -EINVAL; + addr1 = round_down(args->init_ipa_base1, l2_granule); + end1 = round_up(args->init_ipa_base1 + args->init_ipa_size1, l2_granule); + addr2 = round_down(args->init_ipa_base2, l2_granule); + end2 = round_up(args->init_ipa_base2 + args->init_ipa_size2, l2_granule);
- if (args->flags & ~TMI_MEASURE_CONTENT) + if (end1 < addr1 || end2 < addr2) return -EINVAL; - ipa_base = round_down(args->populate_ipa_base, l2_granule); - ipa_end = round_up(args->populate_ipa_base + - args->populate_ipa_size + l2_granule, l2_granule);
- if (ipa_end < ipa_base) + if (kvm_cvm_state(kvm) != CVM_STATE_NEW) return -EINVAL;
- numa_set = kvm_get_first_binded_numa_set(kvm); - map_size = l2_granule; + ret = kvm_cvm_create_ttt(kvm, l2_granule, addr1, end1); + if (ret) + return ret; + ret = kvm_cvm_create_ttt(kvm, l2_granule, addr2, end2); + return ret; +} + +static int kvm_cvm_populate_ram_region(struct kvm *kvm, u64 map_size, + phys_addr_t ipa_base, phys_addr_t ipa_end) +{ + u64 dst_phys; + phys_addr_t gpa; + u64 numa_set = kvm_get_first_binded_numa_set(kvm); + struct cvm *cvm = (struct cvm *)kvm->arch.cvm; + for (gpa = ipa_base; gpa < ipa_end; gpa += map_size) { dst_phys = tmi_mem_alloc(cvm->rd, numa_set, TMM_MEM_TYPE_CVM_PA, map_size); if (!dst_phys) { @@ -680,10 +675,39 @@ static int kvm_populate_ipa_cvm_range(struct kvm *kvm, return -EFAULT; } } - return 0; }
+static int kvm_populate_ipa_cvm_range(struct kvm *kvm, + struct kvm_cap_arm_tmm_populate_region_args *args) +{ + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + phys_addr_t ipa_base1, ipa_end1, ipa_base2, ipa_end2; + + if (kvm_cvm_state(kvm) != CVM_STATE_NEW) + return -EINVAL; + if (!IS_ALIGNED(args->populate_ipa_base1, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size1, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_base2, PAGE_SIZE) || + !IS_ALIGNED(args->populate_ipa_size2, PAGE_SIZE)) + return -EINVAL; + + if (args->flags & ~TMI_MEASURE_CONTENT) + return -EINVAL; + ipa_base1 = round_down(args->populate_ipa_base1, l2_granule); + ipa_end1 = round_up(args->populate_ipa_base1 + + args->populate_ipa_size1 + l2_granule, l2_granule); + ipa_base2 = round_down(args->populate_ipa_base2, l2_granule); + ipa_end2 = round_up(args->populate_ipa_base2 + + args->populate_ipa_size2 + l2_granule, l2_granule); + + if (ipa_end1 < ipa_base1 || ipa_end2 < ipa_base2) + return -EINVAL; + + return kvm_cvm_populate_ram_region(kvm, l2_granule, ipa_base1, ipa_end1) || + kvm_cvm_populate_ram_region(kvm, l2_granule, ipa_base2, ipa_end2); +} + int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { int r = 0; @@ -906,7 +930,7 @@ int kvm_load_user_data(struct kvm *kvm, unsigned long arg) unsigned long ipa_end = numa_node->ipa_start + numa_node->ipa_size;
if (user_data.loader_start < numa_node->ipa_start || - user_data.initrd_start + user_data.initrd_size > ipa_end) + user_data.dtb_end > ipa_end) return -EFAULT; for (i = 0; i < numa_info->numa_cnt; i++) total_size += numa_info->numa_nodes[i].ipa_size; @@ -915,8 +939,9 @@ int kvm_load_user_data(struct kvm *kvm, unsigned long arg) }
cvm->loader_start = user_data.loader_start; + cvm->image_end = user_data.image_end; cvm->initrd_start = user_data.initrd_start; - cvm->initrd_size = user_data.initrd_size; + cvm->dtb_end = user_data.dtb_end; cvm->ram_size = user_data.ram_size; memcpy(&cvm->numa_info, numa_info, sizeof(struct kvm_numa_info));
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2f6f3e893..c8e7fad3f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1395,8 +1395,9 @@ struct kvm_numa_info {
struct kvm_user_data { __u64 loader_start; + __u64 image_end; __u64 initrd_start; - __u64 initrd_size; + __u64 dtb_end; __u64 ram_size; struct kvm_numa_info numa_info; };