From: chenjiajun chenjiajun8@huawei.com
virt inclusion category: feature bugzilla: 46853 CVE: NA
This patch export remaining aarch64 exit items to vcpu_stat via debugfs. The items include: fp_asimd_exit_stat, irq_exit_stat, sys64_exit_stat, mabt_exit_stat, fail_entry_exit_stat, internal_error_exit_stat, unknown_ec_exit_stat, cp15_32_exit_stat, cp15_64_exit_stat, cp14_mr_exit_stat, cp14_ls_exit_stat, cp14_64_exit_stat, smc_exit_stat, sve_exit_stat, debug_exit_stat
Signed-off-by: Biaoxiang Ye yebiaoxiang@huawei.com Signed-off-by: Zengruan Ye yezengruan@huawei.com Signed-off-by: chenjiajun chenjiajun8@huawei.com --- arch/arm64/include/asm/kvm_host.h | 15 +++++++++++++++ arch/arm64/kvm/guest.c | 15 +++++++++++++++ arch/arm64/kvm/handle_exit.c | 8 ++++++++ arch/arm64/kvm/hyp/include/hyp/switch.h | 1 + arch/arm64/kvm/mmu.c | 1 + arch/arm64/kvm/sys_regs.c | 11 +++++++++++ 6 files changed, 51 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 1ba35b90bdb4..db5992ecb207 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -467,6 +467,21 @@ struct kvm_vcpu_stat { u64 mmio_exit_user; u64 mmio_exit_kernel; u64 exits; + u64 fp_asimd_exit_stat; + u64 irq_exit_stat; + u64 sys64_exit_stat; + u64 mabt_exit_stat; + u64 fail_entry_exit_stat; + u64 internal_error_exit_stat; + u64 unknown_ec_exit_stat; + u64 cp15_32_exit_stat; + u64 cp15_64_exit_stat; + u64 cp14_mr_exit_stat; + u64 cp14_ls_exit_stat; + u64 cp14_64_exit_stat; + u64 smc_exit_stat; + u64 sve_exit_stat; + u64 debug_exit_stat; };
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index c584b0fb7692..0a9096ba41a4 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -55,6 +55,21 @@ struct dfx_kvm_stats_debugfs_item dfx_debugfs_entries[] = { DFX_STAT("mmio_exit_user", mmio_exit_user), DFX_STAT("mmio_exit_kernel", mmio_exit_kernel), DFX_STAT("exits", exits), + DFX_STAT("fp_asimd_exit_stat", fp_asimd_exit_stat), + DFX_STAT("irq_exit_stat", irq_exit_stat), + DFX_STAT("sys64_exit_stat", sys64_exit_stat), + DFX_STAT("mabt_exit_stat", mabt_exit_stat), + DFX_STAT("fail_entry_exit_stat", fail_entry_exit_stat), + DFX_STAT("internal_error_exit_stat", internal_error_exit_stat), + DFX_STAT("unknown_ec_exit_stat", unknown_ec_exit_stat), + DFX_STAT("cp15_32_exit_stat", cp15_32_exit_stat), + DFX_STAT("cp15_64_exit_stat", cp15_64_exit_stat), + DFX_STAT("cp14_mr_exit_stat", cp14_mr_exit_stat), + DFX_STAT("cp14_ls_exit_stat", cp14_ls_exit_stat), + DFX_STAT("cp14_64_exit_stat", cp14_64_exit_stat), + DFX_STAT("smc_exit_stat", smc_exit_stat), + DFX_STAT("sve_exit_stat", sve_exit_stat), + DFX_STAT("debug_exit_stat", debug_exit_stat), { NULL } };
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 5d690d60ccad..7199dd851454 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -62,6 +62,7 @@ static int handle_smc(struct kvm_vcpu *vcpu) */ vcpu_set_reg(vcpu, 0, ~0UL); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + vcpu->stat.smc_exit_stat++; return 1; }
@@ -124,6 +125,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
run->exit_reason = KVM_EXIT_DEBUG; run->debug.arch.hsr = esr; + vcpu->stat.debug_exit_stat++;
switch (ESR_ELx_EC(esr)) { case ESR_ELx_EC_WATCHPT_LOW: @@ -152,6 +154,7 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu) esr, esr_get_class_string(esr));
kvm_inject_undefined(vcpu); + vcpu->stat.unknown_ec_exit_stat++; return 1; }
@@ -159,6 +162,7 @@ static int handle_sve(struct kvm_vcpu *vcpu) { /* Until SVE is supported for guests: */ kvm_inject_undefined(vcpu); + vcpu->stat.sve_exit_stat++; return 1; }
@@ -262,6 +266,7 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
switch (exception_index) { case ARM_EXCEPTION_IRQ: + vcpu->stat.irq_exit_stat++; return 1; case ARM_EXCEPTION_EL1_SERROR: return 1; @@ -273,6 +278,7 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index) * is pre-empted by kvm_reboot()'s shutdown call. */ run->exit_reason = KVM_EXIT_FAIL_ENTRY; + vcpu->stat.fail_entry_exit_stat++; return 0; case ARM_EXCEPTION_IL: /* @@ -280,11 +286,13 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index) * have been corrupted somehow. Give up. */ run->exit_reason = KVM_EXIT_FAIL_ENTRY; + vcpu->stat.fail_entry_exit_stat++; return -EINVAL; default: kvm_pr_unimpl("Unsupported exception type: %d", exception_index); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->stat.internal_error_exit_stat++; return 0; } } diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 1f875a8f20c4..4ef2dfe8186b 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -223,6 +223,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) esr_ec != ESR_ELx_EC_SVE) return false;
+ vcpu->stat.fp_asimd_exit_stat++; /* Don't handle SVE traps for non-SVE vcpus here: */ if (!sve_guest) if (esr_ec != ESR_ELx_EC_FP_ASIMD) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 75814a02d189..d6c20d899575 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -763,6 +763,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, write_fault = kvm_is_write_fault(vcpu); exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); + vcpu->stat.mabt_exit_stat++;
if (fault_status == FSC_PERM && !write_fault && !exec_fault) { kvm_err("Unexpected L2 read permission error\n"); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index c1fac9836af1..be13a66b50ed 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2156,6 +2156,8 @@ static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu) { kvm_inject_undefined(vcpu); + vcpu->stat.cp14_ls_exit_stat++; + return 1; }
@@ -2326,21 +2328,29 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu) { + vcpu->stat.cp15_64_exit_stat++; + return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs)); }
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu) { + vcpu->stat.cp15_32_exit_stat++; + return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); }
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu) { + vcpu->stat.cp14_64_exit_stat++; + return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs)); }
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu) { + vcpu->stat.cp14_mr_exit_stat++; + return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs)); }
@@ -2398,6 +2408,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) int ret;
trace_kvm_handle_sys_reg(esr); + vcpu->stat.sys64_exit_stat++;
params.is_aarch32 = false; params.is_32bit = false;