[PATCH OLK-5.10 2/4] pmu: enable pmu phys irq inject for cvm

From: Jingxian He <hejingxian@huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Enable pmu phys irq inject to Confidential VMs.
Signed-off-by: Jingxian He <hejingxian@huawei.com>
---
arch/arm64/include/asm/kvm_tmi.h | 5 ++++-
arch/arm64/kvm/arm.c | 24 ++++++++++++++++++++++++
arch/arm64/kvm/pmu-emul.c | 9 +++++++++
drivers/perf/arm_pmu.c | 17 +++++++++++++++++
include/linux/perf/arm_pmu.h | 3 +++
5 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h
index 554b3e439..49ae4f77c 100644
--- a/arch/arm64/include/asm/kvm_tmi.h
+++ b/arch/arm64/include/asm/kvm_tmi.h
@@ -4,6 +4,7 @@
*/
#ifndef __TMM_TMI_H
#define __TMM_TMI_H
+#ifdef CONFIG_CVM_HOST
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_pgtable.h>
@@ -144,6 +145,7 @@ struct tmi_tec_exit {
uint64_t cntp_ctl;
uint64_t cntp_cval;
uint64_t imm;
+ uint64_t pmu_ovf_status;
};
struct tmi_tec_run {
@@ -370,4 +372,5 @@ unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool serror_pending, bool ext_dabt_pending);
-#endif
+#endif
+#endif
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 32974a10e..6790b06f9 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -40,6 +40,7 @@
#include <asm/sections.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
+#include <linux/perf/arm_pmu.h>
#endif
#include <kvm/arm_hypercalls.h>
@@ -890,6 +891,18 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
xfer_to_guest_mode_work_pending();
}
+#ifdef CONFIG_CVM_HOST
+static inline void update_pmu_phys_irq(struct kvm_vcpu *vcpu, bool *pmu_stopped)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ if (pmu->irq_level) {
+ *pmu_stopped = true;
+ arm_pmu_set_phys_irq(false);
+ }
+}
+#endif
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@@ -934,6 +947,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN;
while (ret > 0) {
+#ifdef CONFIG_CVM_HOST
+ bool pmu_stopped = false;
+#endif
/*
* Check conditions before entering the guest
*/
@@ -953,6 +969,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_disable();
kvm_pmu_flush_hwstate(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ update_pmu_phys_irq(vcpu, &pmu_stopped);
+#endif
local_irq_disable();
@@ -1063,6 +1083,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
#endif
preempt_enable();
+#ifdef CONFIG_CVM_HOST
+ if (pmu_stopped)
+ arm_pmu_set_phys_irq(true);
+#endif
/*
* The ARMv8 architecture doesn't give the hypervisor
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 9fdc76c6d..00aa9ebe6 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -13,6 +13,7 @@
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
+#include <asm/kvm_tmi.h>
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
@@ -370,6 +371,14 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = 0;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ reg = run->tec_exit.pmu_ovf_status;
+ return reg;
+ }
+#endif
+
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 4ef8aee84..743f52d94 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -797,6 +797,23 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
+#ifdef CONFIG_CVM_HOST
+void arm_pmu_set_phys_irq(bool enable)
+{
+ int cpu = get_cpu();
+ struct arm_pmu *pmu = per_cpu(cpu_armpmu, cpu);
+ int irq;
+
+ irq = armpmu_get_cpu_irq(pmu, cpu);
+ if (irq && !enable)
+ per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
+ else if (irq && enable)
+ per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
+
+ put_cpu();
+}
+#endif
+
#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
{
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 6fd58c8f9..c7a35d321 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -189,6 +189,9 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
+#ifdef CONFIG_CVM_HOST
+void arm_pmu_set_phys_irq(bool enable);
+#endif
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
--
2.33.0
participants (1)
-
hejingxian