From: l30055499 <liupingwei(a)huawei.com>
Signed-off-by: l30055499 <liupingwei(a)huawei.com>
---
accel/kvm/kvm-all.c | 34 ++++
hw/arm/boot.c | 49 ++++-
hw/arm/virt.c | 41 ++++-
hw/block/pflash_cfi01.c | 3 -
hw/intc/arm_gicv3_cpuif.c | 2 +-
hw/net/virtio-net.c | 2 +-
include/hw/arm/boot.h | 1 +
include/hw/arm/virt.h | 1 +
include/hw/virtio/virtio.h | 4 +-
include/sysemu/kvm.h | 7 +
linux-headers/asm-arm64/kvm.h | 62 +++++++
linux-headers/linux/kvm.h | 31 +++-
qapi/qom.json | 30 ++-
qemu-options.hx | 4 +-
softmmu/vl.c | 5 +-
target/arm/kvm-tmm.c | 335 ++++++++++++++++++++++++++++++++++
target/arm/kvm.c | 6 +-
target/arm/kvm64.c | 5 +
target/arm/kvm_arm.h | 10 +
target/arm/meson.build | 1 +
20 files changed, 613 insertions(+), 20 deletions(-)
create mode 100644 target/arm/kvm-tmm.c
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index c477f7a63..11f62a597 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -79,6 +79,9 @@ struct KVMParkedVcpu {
};
KVMState *kvm_state;
+
+bool cvm_allowed = false;
+
bool kvm_kernel_irqchip;
bool kvm_split_irqchip;
bool kvm_async_interrupts_allowed;
@@ -2268,6 +2271,11 @@ uint32_t kvm_dirty_ring_size(void)
return kvm_state->kvm_dirty_ring_size;
}
+static inline bool kvm_is_cvm_type(int type)
+{
+ return type & (1UL << 8);
+}
+
static int kvm_init(MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
@@ -2352,6 +2360,10 @@ static int kvm_init(MachineState *ms)
type = mc->kvm_type(ms, NULL);
}
+ if (kvm_is_cvm_type(type)) {
+ cvm_allowed = true;
+ }
+
do {
ret = kvm_ioctl(s, KVM_CREATE_VM, type);
} while (ret == -EINTR);
@@ -3451,6 +3463,28 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
return r;
}
+int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size,
+ struct kvm_numa_info *numa_info)
+{
+ KVMState *state = kvm_state;
+ struct kvm_user_data data;
+ int ret;
+
+ data.loader_start = loader_start;
+ data.image_end = image_end;
+ data.initrd_start = initrd_start;
+ data.dtb_end = dtb_end;
+ data.ram_size = ram_size;
+ memcpy(&data.numa_info, numa_info, sizeof(struct kvm_numa_info));
+
+ ret = kvm_vm_ioctl(state, KVM_LOAD_USER_DATA, &data);
+ if (ret < 0) {
+ error_report("%s: KVM_LOAD_USER_DATA failed!\n", __func__);
+ }
+
+ return ret;
+}
+
static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size)
{
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 3d45de177..fcbbdd42c 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -27,6 +27,7 @@
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "qemu/units.h"
+#include "kvm_arm.h"
/* Kernel boot protocol is specified in the kernel docs
* Documentation/arm/Booting and Documentation/arm64/booting.txt
@@ -629,7 +630,7 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
}
} else {
rc = fdt_add_memory_node(fdt, acells, binfo->loader_start,
- scells, binfo->ram_size, -1);
+ scells, binfo->ram_size, -1);
if (rc < 0) {
fprintf(stderr, "couldn't add /memory@%"PRIx64" node\n",
binfo->loader_start);
@@ -1029,7 +1030,7 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
CPUState *cs;
AddressSpace *as = arm_boot_address_space(cpu, info);
int kernel_size;
- int initrd_size;
+ int initrd_size = 0;
int is_linux = 0;
uint64_t elf_entry;
/* Addresses of first byte used and first byte not used by the image */
@@ -1255,6 +1256,16 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
ARM_CPU(cs)->env.boot_info = info;
}
+
+ if (kvm_enabled() && cvm_enabled()) {
+ if (info->dtb_limit == 0) {
+ info->dtb_limit = info->dtb_start + 0x100000;
+ }
+ kvm_load_user_data(info->loader_start, image_high_addr, info->initrd_start,
+ info->dtb_limit, info->ram_size, (struct kvm_numa_info *)info->numa_info);
+ tmm_add_ram_region(info->loader_start, image_high_addr - info->loader_start,
+ info->initrd_start, info->dtb_limit - info->initrd_start, true);
+ }
}
static void arm_setup_firmware_boot(ARMCPU *cpu, struct arm_boot_info *info)
@@ -1344,6 +1355,37 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
info->initrd_filename = ms->initrd_filename;
info->dtb_filename = ms->dtb;
info->dtb_limit = 0;
+ info->ram_size = ms->ram_size;
+ info->numa_info = g_malloc(sizeof(struct kvm_numa_info));
+ struct kvm_numa_info *numa_info = (struct kvm_numa_info *) info->numa_info;
+ if (ms->numa_state != NULL && ms->numa_state->num_nodes > 0) {
+ numa_info->numa_cnt = ms->numa_state->num_nodes;
+ __u64 mem_base = info->loader_start;
+ for (int64_t i = 0; i < ms->numa_state->num_nodes && i < MAX_NUMA_NODE; i++) {
+ __u64 mem_len = ms->numa_state->nodes[i].node_mem;
+ numa_info->numa_nodes[i].numa_id = i;
+ numa_info->numa_nodes[i].ipa_start = mem_base;
+ numa_info->numa_nodes[i].ipa_size = mem_len;
+ memcpy(numa_info->numa_nodes[i].host_numa_nodes, ms->numa_state->nodes[i].node_memdev->host_nodes,
+ MAX_NODES / BITS_PER_LONG * sizeof(__u64));
+ mem_base += mem_len;
+ }
+ } else {
+ numa_info->numa_cnt = 1;
+ numa_info->numa_nodes[0].numa_id = 0;
+ numa_info->numa_nodes[0].ipa_start = info->loader_start;
+ numa_info->numa_nodes[0].ipa_size = info->ram_size;
+ memset(numa_info->numa_nodes[0].host_numa_nodes, 0, MAX_NODES / BITS_PER_LONG * sizeof(__u64));
+ }
+
+ for (int cpu = ms->smp.cpus - 1; cpu >= 0; cpu--) {
+ ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
+ CPUState *cs = CPU(armcpu);
+ uint64_t node_id = 0;
+ if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id)
+ node_id = ms->possible_cpus->cpus[cs->cpu_index].props.node_id;
+ bitmap_set((unsigned long *)numa_info->numa_nodes[node_id].cpu_id, cpu, 1);
+ }
/* Load the kernel. */
if (!info->kernel_filename || info->firmware_loaded) {
@@ -1352,6 +1394,9 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
arm_setup_direct_kernel_boot(cpu, info);
}
+ g_free(info->numa_info);
+ info->numa_info = NULL;
+
if (!info->skip_dtb_autoload && have_dtb(info)) {
if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) {
exit(1);
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 93554cccf..30842cdce 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -1801,6 +1801,19 @@ static void virt_set_memmap(VirtMachineState *vms)
vms->memmap[i] = base_memmap[i];
}
+ /* fix VIRT_MEM range */
+ if (object_property_find(OBJECT(current_machine), "kvm-type")) {
+ g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
+ "kvm-type", &error_abort);
+
+ if (!strcmp(kvm_type, "cvm")) {
+ vms->memmap[VIRT_MEM].base = 3 * GiB;
+ vms->memmap[VIRT_MEM].size = ms->ram_size;
+ printf("[qemu] fix VIRT_MEM range 0x%llx - 0x%llx\n", (unsigned long long)(vms->memmap[VIRT_MEM].base),
+ (unsigned long long)(vms->memmap[VIRT_MEM].base + ms->ram_size));
+ }
+ }
+
if (ms->ram_slots > ACPI_MAX_RAM_SLOTS) {
error_report("unsupported number of memory slots: %"PRIu64,
ms->ram_slots);
@@ -2072,7 +2085,7 @@ static void machvirt_init(MachineState *machine)
*/
if (vms->secure && firmware_loaded) {
vms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED;
- } else if (vms->virt) {
+ } else if (vms->virt || cvm_enabled()) {
vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC;
} else {
vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC;
@@ -2118,7 +2131,10 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
- create_fdt(vms);
+ if (cvm_enabled()) {
+ kvm_arm_tmm_init(machine->cgs, &error_fatal);
+ }
+ create_fdt(vms);
qemu_log("cpu init start\n");
cpu_class = object_class_by_name(ms->cpu_type);
@@ -2991,6 +3007,7 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
static int virt_kvm_type(MachineState *ms, const char *type_str)
{
VirtMachineState *vms = VIRT_MACHINE(ms);
+ int cvm_type = type_str ? (1UL << 8) : 0;
int max_vm_pa_size, requested_pa_size;
bool fixed_ipa;
@@ -3020,7 +3037,9 @@ static int virt_kvm_type(MachineState *ms, const char *type_str)
* the implicit legacy 40b IPA setting, in which case the kvm_type
* must be 0.
*/
- return fixed_ipa ? 0 : requested_pa_size;
+ return strcmp(type_str, "cvm") == 0 ?
+ ((fixed_ipa ? 0 : requested_pa_size) | cvm_type) :
+ (fixed_ipa ? 0 : requested_pa_size);
}
static void virt_machine_class_init(ObjectClass *oc, void *data)
@@ -3143,6 +3162,19 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
}
+static char *virt_get_kvm_type(Object *obj, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+ return g_strdup(vms->kvm_type);
+}
+
+static void virt_set_kvm_type(Object *obj, const char *value, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+ g_free(vms->kvm_type);
+ vms->kvm_type = g_strdup(value);
+}
+
static void virt_instance_init(Object *obj)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
@@ -3194,6 +3226,9 @@ static void virt_instance_init(Object *obj)
vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
vms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
+
+ object_property_add_str(obj, "kvm-type", virt_get_kvm_type, virt_set_kvm_type);
+ object_property_set_description(obj, "kvm-type", "CVM or Normal VM");
}
static const TypeInfo virt_machine_info = {
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index 81f9f971d..c9d2551da 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -496,9 +496,6 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset,
case 0xe8: /* Write to buffer */
trace_pflash_write(pfl->name, "write to buffer");
/* FIXME should save @offset, @width for case 1+ */
- qemu_log_mask(LOG_UNIMP,
- "%s: Write to buffer emulation is flawed\n",
- __func__);
pfl->status |= 0x80; /* Ready! */
break;
case 0xf0: /* Probe for AMD flash */
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index eaa1381b3..778fe4c16 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -91,7 +91,7 @@ static bool icv_access(CPUARMState *env, int hcr_flags)
bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
return flagmatch && arm_current_el(env) == 1
- && !arm_is_secure_below_el3(env);
+ && !arm_is_secure_below_el3(env);
}
static int read_vbpr(GICv3CPUState *cs, int grp)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index f3fb9393b..eca3be619 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3771,7 +3771,7 @@ static Property virtio_net_properties[] = {
DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
VIRTIO_NET_F_GUEST_UFO, true),
DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
- VIRTIO_NET_F_GUEST_ANNOUNCE, true),
+ VIRTIO_NET_F_GUEST_ANNOUNCE, false),
DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
VIRTIO_NET_F_HOST_TSO4, true),
DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h
index c3c4d3ea7..36aa5dd5c 100644
--- a/include/hw/arm/boot.h
+++ b/include/hw/arm/boot.h
@@ -36,6 +36,7 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size);
/* arm_boot.c */
struct arm_boot_info {
uint64_t ram_size;
+ void *numa_info;
const char *kernel_filename;
const char *kernel_cmdline;
const char *initrd_filename;
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 4ddee19b1..3fc1adcb4 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -176,6 +176,7 @@ struct VirtMachineState {
PCIBus *bus;
char *oem_id;
char *oem_table_id;
+ char *kvm_type;
};
#define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 4cc278f12..a7aea8f91 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -289,9 +289,9 @@ typedef struct VirtIORNGConf VirtIORNGConf;
#define DEFINE_VIRTIO_COMMON_FEATURES(_state, _field) \
DEFINE_PROP_BIT64("indirect_desc", _state, _field, \
- VIRTIO_RING_F_INDIRECT_DESC, true), \
+ VIRTIO_RING_F_INDIRECT_DESC, false), \
DEFINE_PROP_BIT64("event_idx", _state, _field, \
- VIRTIO_RING_F_EVENT_IDX, true), \
+ VIRTIO_RING_F_EVENT_IDX, false), \
DEFINE_PROP_BIT64("notify_on_empty", _state, _field, \
VIRTIO_F_NOTIFY_ON_EMPTY, true), \
DEFINE_PROP_BIT64("any_layout", _state, _field, \
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index 9f52d08ce..776c6cd88 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -19,6 +19,7 @@
#include "exec/memattrs.h"
#include "qemu/accel.h"
#include "qom/object.h"
+#include "linux-headers/linux/kvm.h"
#ifdef NEED_CPU_H
# ifdef CONFIG_KVM
@@ -32,6 +33,7 @@
#ifdef CONFIG_KVM_IS_POSSIBLE
extern bool kvm_allowed;
+extern bool cvm_allowed;
extern bool kvm_kernel_irqchip;
extern bool kvm_split_irqchip;
extern bool kvm_async_interrupts_allowed;
@@ -48,6 +50,7 @@ extern bool kvm_ioeventfd_any_length_allowed;
extern bool kvm_msi_use_devid;
#define kvm_enabled() (kvm_allowed)
+#define cvm_enabled() (cvm_allowed)
/**
* kvm_irqchip_in_kernel:
*
@@ -170,6 +173,7 @@ extern bool kvm_msi_use_devid;
#else
#define kvm_enabled() (0)
+#define cvm_enabled() (0)
#define kvm_irqchip_in_kernel() (false)
#define kvm_irqchip_is_split() (false)
#define kvm_async_interrupts_enabled() (false)
@@ -554,6 +558,9 @@ bool kvm_dirty_ring_enabled(void);
uint32_t kvm_dirty_ring_size(void);
+int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size,
+ struct kvm_numa_info *numa_info);
+
#ifdef __aarch64__
int kvm_create_shadow_device(PCIDevice *dev);
int kvm_delete_shadow_device(PCIDevice *dev);
diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h
index 3d2ce9912..0e75c4eeb 100644
--- a/linux-headers/asm-arm64/kvm.h
+++ b/linux-headers/asm-arm64/kvm.h
@@ -106,6 +106,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
+#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */
struct kvm_vcpu_init {
__u32 target;
@@ -411,6 +412,67 @@ struct kvm_arm_copy_mte_tags {
#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
+/* KVM_CAP_ARM_TMM on VM fd */
+#define KVM_CAP_ARM_TMM_CONFIG_CVM 0
+#define KVM_CAP_ARM_TMM_CREATE_RD 1
+#define KVM_CAP_ARM_TMM_POPULATE_CVM 2
+#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3
+
+#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0
+#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1
+
+#define KVM_CAP_ARM_TMM_RPV_SIZE 64
+
+/* List of configuration items accepted for KVM_CAP_ARM_RME_CONFIG_REALM */
+#define KVM_CAP_ARM_TMM_CFG_RPV 0
+#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1
+#define KVM_CAP_ARM_TMM_CFG_SVE 2
+#define KVM_CAP_ARM_TMM_CFG_DBG 3
+#define KVM_CAP_ARM_TMM_CFG_PMU 4
+
+struct kvm_cap_arm_tmm_config_item {
+ __u32 cfg;
+ union {
+ /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */
+ struct {
+ __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE];
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */
+ struct {
+ __u32 hash_algo;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */
+ struct {
+ __u32 sve_vq;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */
+ struct {
+ __u32 num_brps;
+ __u32 num_wrps;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */
+ struct {
+ __u32 num_pmu_cntrs;
+ };
+ /* Fix the size of the union */
+ __u8 reserved[256];
+ };
+};
+
+#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0)
+struct kvm_cap_arm_tmm_populate_region_args {
+ __u64 populate_ipa_base1;
+ __u64 populate_ipa_size1;
+ __u64 populate_ipa_base2;
+ __u64 populate_ipa_size2;
+ __u32 flags;
+ __u32 reserved[3];
+};
+
#endif
#endif /* __ARM_KVM_H__ */
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index cd0885f52..c4d42255c 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -14,6 +14,8 @@
#include <linux/ioctl.h>
#include <asm/kvm.h>
+#include "sysemu/numa.h"
+
#define KVM_API_VERSION 12
/* *** Deprecated interfaces *** */
@@ -1126,6 +1128,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_X86_NOTIFY_VMEXIT 219
#define KVM_CAP_ARM_CPU_FEATURE 555
+#define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */
#define KVM_CAP_ARM_VIRT_MSI_BYPASS 799
@@ -1370,6 +1373,32 @@ struct kvm_vfio_spapr_tce {
__s32 tablefd;
};
+#define MAX_NUMA_NODE 8
+#define MAX_CPU_BIT_MAP 4
+#define MAX_NODE_BIT_MAP (MAX_NODES / BITS_PER_LONG)
+
+struct kvm_numa_node {
+ __u64 numa_id;
+ __u64 ipa_start;
+ __u64 ipa_size;
+ __u64 host_numa_nodes[MAX_NODE_BIT_MAP];
+ __u64 cpu_id[MAX_CPU_BIT_MAP];
+};
+
+struct kvm_numa_info {
+ __u64 numa_cnt;
+ struct kvm_numa_node numa_nodes[MAX_NUMA_NODE];
+};
+
+struct kvm_user_data {
+ __u64 loader_start;
+ __u64 image_end;
+ __u64 initrd_start;
+ __u64 dtb_end;
+ __u64 ram_size;
+ struct kvm_numa_info numa_info;
+};
+
/*
* ioctls for VM fds
*/
@@ -1388,7 +1417,7 @@ struct kvm_vfio_spapr_tce {
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
-
+#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data)
/* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
__u64 user_addr;
diff --git a/qapi/qom.json b/qapi/qom.json
index eeb5395ff..c9e7de895 100644
--- a/qapi/qom.json
+++ b/qapi/qom.json
@@ -785,6 +785,30 @@
'reduced-phys-bits': 'uint32',
'*kernel-hashes': 'bool' } }
+##
+# @TmmGuestMeasurementAlgo:
+#
+# Algorithm to use for cvm measurements
+#
+# Since: FIXME
+##
+{ 'enum': 'TmmGuestMeasurementAlgo',
+'data': ['default', 'sha256', 'sha512'] }
+
+##
+# @TmmGuestProperties:
+#
+# Properties for tmm-guest objects.
+#
+# @sve-vector-length: SVE vector length (default: 0, SVE disabled)
+#
+# Since: FIXME
+##
+{ 'struct': 'TmmGuestProperties',
+ 'data': { '*sve-vector-length': 'uint32',
+ '*num-pmu-counters': 'uint32',
+ '*measurement-algo': 'TmmGuestMeasurementAlgo' } }
+
##
# @ObjectType:
#
@@ -842,7 +866,8 @@
'tls-creds-psk',
'tls-creds-x509',
'tls-cipher-suites',
- { 'name': 'x-remote-object', 'features': [ 'unstable' ] }
+ { 'name': 'x-remote-object', 'features': [ 'unstable' ] },
+ 'tmm-guest'
] }
##
@@ -905,7 +930,8 @@
'tls-creds-psk': 'TlsCredsPskProperties',
'tls-creds-x509': 'TlsCredsX509Properties',
'tls-cipher-suites': 'TlsCredsProperties',
- 'x-remote-object': 'RemoteObjectProperties'
+ 'x-remote-object': 'RemoteObjectProperties',
+ 'tmm-guest': 'TmmGuestProperties'
} }
##
diff --git a/qemu-options.hx b/qemu-options.hx
index d940b4aea..7cb5717e0 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -597,7 +597,7 @@ SRST
ERST
DEF("m", HAS_ARG, QEMU_OPTION_m,
- "-m [size=]megs[,slots=n,maxmem=size]\n"
+ "-m [size=]megs[,slots=n,maxmem=size,cvm_mem=size]\n"
" configure guest RAM\n"
" size: initial amount of guest memory\n"
" slots: number of hotplug slots (default: none)\n"
@@ -605,7 +605,7 @@ DEF("m", HAS_ARG, QEMU_OPTION_m,
"NOTE: Some architectures might enforce a specific granularity\n",
QEMU_ARCH_ALL)
SRST
-``-m [size=]megs[,slots=n,maxmem=size]``
+``-m [size=]megs[,slots=n,maxmem=size,cvm_mem=size]``
Sets guest startup RAM size to megs megabytes. Default is 128 MiB.
Optionally, a suffix of "M" or "G" can be used to signify a value in
megabytes or gigabytes respectively. Optional pair slots, maxmem
diff --git a/softmmu/vl.c b/softmmu/vl.c
index 9dcbc3b26..6c6db0c4f 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -414,6 +414,10 @@ static QemuOptsList qemu_mem_opts = {
.name = "maxmem",
.type = QEMU_OPT_SIZE,
},
+ {
+ .name = "cvm_mem",
+ .type = QEMU_OPT_SIZE,
+ },
{ /* end of list */ }
},
};
@@ -2138,7 +2142,6 @@ static void set_memory_options(MachineClass *mc)
error_report("invalid -m option value: missing 'maxmem' option");
exit(EXIT_FAILURE);
}
-
loc_pop(&loc);
}
diff --git a/target/arm/kvm-tmm.c b/target/arm/kvm-tmm.c
new file mode 100644
index 000000000..94500e885
--- /dev/null
+++ b/target/arm/kvm-tmm.c
@@ -0,0 +1,335 @@
+
+#include "qemu/osdep.h"
+#include "exec/confidential-guest-support.h"
+#include "hw/boards.h"
+#include "hw/core/cpu.h"
+#include "kvm_arm.h"
+#include "migration/blocker.h"
+#include "qapi/error.h"
+#include "qom/object_interfaces.h"
+#include "sysemu/kvm.h"
+#include "sysemu/runstate.h"
+#include "hw/loader.h"
+
+#define TYPE_TMM_GUEST "tmm-guest"
+OBJECT_DECLARE_SIMPLE_TYPE(TmmGuest, TMM_GUEST)
+
+#define TMM_PAGE_SIZE qemu_real_host_page_size
+#define TMM_MAX_PMU_CTRS 0x20
+#define TMM_MAX_CFG 5
+
+struct TmmGuest {
+ ConfidentialGuestSupport parent_obj;
+ GSList *ram_regions;
+ TmmGuestMeasurementAlgo measurement_algo;
+ uint32_t sve_vl;
+ uint32_t num_pmu_cntrs;
+};
+
+typedef struct {
+ hwaddr base1;
+ hwaddr len1;
+ hwaddr base2;
+ hwaddr len2;
+ bool populate;
+} TmmRamRegion;
+
+static TmmGuest *tmm_guest;
+
+bool kvm_arm_tmm_enabled(void)
+{
+ return !!tmm_guest;
+}
+
+static int tmm_configure_one(TmmGuest *guest, uint32_t cfg, Error **errp)
+{
+ int ret = 1;
+ const char *cfg_str;
+ struct kvm_cap_arm_tmm_config_item args = {
+ .cfg = cfg,
+ };
+
+ switch (cfg) {
+ case KVM_CAP_ARM_TMM_CFG_RPV:
+ return 0;
+ case KVM_CAP_ARM_TMM_CFG_HASH_ALGO:
+ switch (guest->measurement_algo) {
+ case TMM_GUEST_MEASUREMENT_ALGO_DEFAULT:
+ return 0;
+ case TMM_GUEST_MEASUREMENT_ALGO_SHA256:
+ args.hash_algo = KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256;
+ break;
+ case TMM_GUEST_MEASUREMENT_ALGO_SHA512:
+ args.hash_algo = KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ cfg_str = "hash algorithm";
+ break;
+ case KVM_CAP_ARM_TMM_CFG_SVE:
+ if (!guest->sve_vl) {
+ return 0;
+ }
+ args.sve_vq = guest->sve_vl / 128;
+ cfg_str = "SVE";
+ break;
+ case KVM_CAP_ARM_TMM_CFG_DBG:
+ return 0;
+ case KVM_CAP_ARM_TMM_CFG_PMU:
+ if (!guest->num_pmu_cntrs) {
+ return 0;
+ }
+ args.num_pmu_cntrs = guest->num_pmu_cntrs;
+ cfg_str = "PMU";
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0,
+ KVM_CAP_ARM_TMM_CONFIG_CVM, (intptr_t)&args);
+ if (ret) {
+ error_setg_errno(errp, -ret, "TMM: failed to configure %s", cfg_str);
+ }
+
+ return ret;
+}
+
+static gint tmm_compare_ram_regions(gconstpointer a, gconstpointer b)
+{
+ const TmmRamRegion *ra = a;
+ const TmmRamRegion *rb = b;
+
+ g_assert(ra->base1 != rb->base1);
+ return ra->base1 < rb->base1 ? -1 : 1;
+}
+
+void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, hwaddr len2, bool populate)
+{
+ TmmRamRegion *region;
+
+ region = g_new0(TmmRamRegion, 1);
+ region->base1 = QEMU_ALIGN_DOWN(base1, TMM_PAGE_SIZE);
+ region->len1 = QEMU_ALIGN_UP(len1, TMM_PAGE_SIZE);
+ region->base2 = QEMU_ALIGN_DOWN(base2, TMM_PAGE_SIZE);
+ region->len2 = QEMU_ALIGN_UP(len2, TMM_PAGE_SIZE);
+ region->populate = populate;
+
+ tmm_guest->ram_regions = g_slist_insert_sorted(tmm_guest->ram_regions,
+ region, tmm_compare_ram_regions);
+}
+
+static void tmm_populate_region(gpointer data, gpointer unused)
+{
+ int ret;
+ const TmmRamRegion *region = data;
+ struct kvm_cap_arm_tmm_populate_region_args populate_args = {
+ .populate_ipa_base1 = region->base1,
+ .populate_ipa_size1 = region->len1,
+ .populate_ipa_base2 = region->base2,
+ .populate_ipa_size2 = region->len2,
+ .flags = KVM_ARM_TMM_POPULATE_FLAGS_MEASURE,
+ };
+
+ if (!region->populate) {
+ return;
+ }
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0,
+ KVM_CAP_ARM_TMM_POPULATE_CVM,
+ (intptr_t)&populate_args);
+ if (ret) {
+ error_report("TMM: failed to populate cvm region (0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx"): %s",
+ region->base1, region->len1, region->base2, region->len2, strerror(-ret));
+ exit(1);
+ }
+}
+
+static int tmm_create_rd(Error **errp)
+{
+ int ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0,
+ KVM_CAP_ARM_TMM_CREATE_RD);
+ if (ret) {
+ error_setg_errno(errp, -ret, "TMM: failed to create tmm Descriptor");
+ }
+ return ret;
+}
+
+static void tmm_vm_state_change(void *opaque, bool running, RunState state)
+{
+ int ret;
+ CPUState *cs;
+
+ if (!running) {
+ return;
+ }
+
+ g_slist_foreach(tmm_guest->ram_regions, tmm_populate_region, NULL);
+ g_slist_free_full(g_steal_pointer(&tmm_guest->ram_regions), g_free);
+
+ CPU_FOREACH(cs) {
+ ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_TEC);
+ if (ret) {
+ error_report("TMM: failed to finalize vCPU: %s", strerror(-ret));
+ exit(1);
+ }
+ }
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0,
+ KVM_CAP_ARM_TMM_ACTIVATE_CVM);
+ if (ret) {
+ error_report("TMM: failed to activate cvm: %s", strerror(-ret));
+ exit(1);
+ }
+}
+
+int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ int ret;
+ int cfg;
+
+ if (!tmm_guest) {
+ return -ENODEV;
+ }
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_ARM_TMM)) {
+ error_setg(errp, "KVM does not support TMM");
+ return -ENODEV;
+ }
+
+ for (cfg = 0; cfg < TMM_MAX_CFG; cfg++) {
+ ret = tmm_configure_one(tmm_guest, cfg, &error_abort);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ ret = tmm_create_rd(&error_abort);
+ if (ret) {
+ return ret;
+ }
+
+ qemu_add_vm_change_state_handler(tmm_vm_state_change, NULL);
+ return 0;
+}
+
+static void tmm_get_sve_vl(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ TmmGuest *guest = TMM_GUEST(obj);
+
+ visit_type_uint32(v, name, &guest->sve_vl, errp);
+}
+
+static void tmm_set_sve_vl(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ TmmGuest *guest = TMM_GUEST(obj);
+ uint32_t value;
+
+ if (!visit_type_uint32(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value & 0x7f || value >= ARM_MAX_VQ * 128) {
+ error_setg(errp, "invalid SVE vector length");
+ return;
+ }
+
+ guest->sve_vl = value;
+}
+
+static void tmm_get_num_pmu_cntrs(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ TmmGuest *guest = TMM_GUEST(obj);
+
+ visit_type_uint32(v, name, &guest->num_pmu_cntrs, errp);
+}
+
+static void tmm_set_num_pmu_cntrs(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ TmmGuest *guest = TMM_GUEST(obj);
+ uint32_t value;
+
+ if (!visit_type_uint32(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value >= TMM_MAX_PMU_CTRS) {
+ error_setg(errp, "invalid number of PMU counters");
+ return;
+ }
+
+ guest->num_pmu_cntrs = value;
+}
+
+static int tmm_get_measurement_algo(Object *obj, Error **errp)
+{
+ TmmGuest *guest = TMM_GUEST(obj);
+
+ return guest->measurement_algo;
+}
+
+static void tmm_set_measurement_algo(Object *obj, int algo, Error **errp)
+{
+ TmmGuest *guest = TMM_GUEST(obj);
+
+ guest->measurement_algo = algo;
+}
+
+static void tmm_guest_class_init(ObjectClass *oc, void *data)
+{
+ object_class_property_add_enum(oc, "measurement-algo",
+ "TmmGuestMeasurementAlgo",
+ &TmmGuestMeasurementAlgo_lookup,
+ tmm_get_measurement_algo,
+ tmm_set_measurement_algo);
+ object_class_property_set_description(oc, "measurement-algo",
+ "cvm measurement algorithm ('sha256', 'sha512')");
+ /*
+ * This is not ideal. Normally SVE parameters are given to -cpu, but the
+ * cvm parameters are needed much earlier than CPU initialization. We also
+ * don't have a way to discover what is supported at the moment, the idea is
+ * that the user knows exactly what hardware it is running on because these
+ * parameters are part of the measurement and play in the attestation.
+ */
+ object_class_property_add(oc, "sve-vector-length", "uint32", tmm_get_sve_vl,
+ tmm_set_sve_vl, NULL, NULL);
+ object_class_property_set_description(oc, "sve-vector-length",
+ "SVE vector length. 0 disables SVE (the default)");
+ object_class_property_add(oc, "num-pmu-counters", "uint32",
+ tmm_get_num_pmu_cntrs, tmm_set_num_pmu_cntrs,
+ NULL, NULL);
+ object_class_property_set_description(oc, "num-pmu-counters",
+ "Number of PMU counters");
+}
+
+static void tmm_guest_instance_init(Object *obj)
+{
+ if (tmm_guest) {
+ error_report("a single instance of TmmGuest is supported");
+ exit(1);
+ }
+ tmm_guest = TMM_GUEST(obj);
+}
+
+static const TypeInfo tmm_guest_info = {
+ .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT,
+ .name = TYPE_TMM_GUEST,
+ .instance_size = sizeof(struct TmmGuest),
+ .instance_init = tmm_guest_instance_init,
+ .class_init = tmm_guest_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_USER_CREATABLE },
+ { }
+ }
+};
+
+static void tmm_register_types(void)
+{
+ type_register_static(&tmm_guest_info);
+}
+type_init(tmm_register_types);
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 38d80adfb..cab9bf0cd 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -605,7 +605,9 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level)
if (kvm_arm_cpreg_level(regidx) > level) {
continue;
}
-
+ if (cvm_enabled() && regidx == KVM_REG_ARM_TIMER_CNT) {
+ continue;
+ }
r.id = regidx;
switch (regidx & KVM_REG_SIZE_MASK) {
case KVM_REG_SIZE_U32:
@@ -1140,7 +1142,7 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
bool kvm_arch_cpu_check_are_resettable(void)
{
- return true;
+ return !cvm_enabled();
}
void kvm_arch_accel_class_init(ObjectClass *oc)
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 0f67b8ba9..1bc851861 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -818,6 +818,11 @@ static int kvm_arm_sve_set_vls(CPUState *cs)
assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
+ if (cvm_enabled()) {
+ /* Already set through tmm config */
+ return 0;
+ }
+
for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
if (test_bit(vq - 1, cpu->sve_vq_map)) {
i = (vq - 1) / 64;
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 8b644b392..abcbd5ab8 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -377,6 +377,11 @@ void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa);
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
+void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, hwaddr len2, bool populate);
+
+int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp);
+bool kvm_arm_tmm_enabled(void);
+
int kvm_arm_get_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *target);
int kvm_arm_set_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *source);
@@ -471,6 +476,11 @@ static inline int kvm_arm_set_one_reg(ARMCPU *cpu, uint64_t regidx,
g_assert_not_reached();
}
+static inline int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ g_assert_not_reached();
+}
+
#endif
static inline const char *gic_class_name(void)
diff --git a/target/arm/meson.build b/target/arm/meson.build
index 50f152214..bb950fbff 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -39,6 +39,7 @@ arm_ss.add(files(
arm_ss.add(zlib)
arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c'))
+arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c', 'kvm-tmm.c'), if_false: files('kvm-stub.c'))
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
'cpu64.c',
--
2.27.0