From: Jingxian He hejingxian@huawei.com
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAGYKI
--------------------------------
Enable pmu phys irq inject to Confidential VMs.
Signed-off-by: Jingxian He hejingxian@huawei.com --- arch/arm64/kvm/arm.c | 24 ++++++++++++++++++++++++ arch/arm64/kvm/pmu-emul.c | 9 +++++++++ drivers/perf/arm_pmu.c | 17 +++++++++++++++++ include/linux/perf/arm_pmu.h | 4 ++++ 4 files changed, 54 insertions(+)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index cff0affca..24f1d898c 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -42,6 +42,7 @@ #include <asm/sections.h> #ifdef CONFIG_HISI_VIRTCCA_HOST #include <asm/kvm_tmi.h> +#include <linux/perf/arm_pmu.h> #endif
#include <kvm/arm_hypercalls.h> @@ -1030,6 +1031,18 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) xfer_to_guest_mode_work_pending(); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static inline void update_pmu_phys_irq(struct kvm_vcpu *vcpu, bool *pmu_stopped) +{ + struct kvm_pmu *pmu = &vcpu->arch.pmu; + + if (pmu->irq_level) { + *pmu_stopped = true; + arm_pmu_set_phys_irq(false); + } +} +#endif + /* * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while * the vCPU is running. @@ -1082,6 +1095,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) run->exit_reason = KVM_EXIT_UNKNOWN; run->flags = 0; while (ret > 0) { +#ifdef CONFIG_HISI_VIRTCCA_HOST + bool pmu_stopped = false; +#endif /* * Check conditions before entering the guest */ @@ -1109,6 +1125,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid);
kvm_pmu_flush_hwstate(vcpu); +#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) + update_pmu_phys_irq(vcpu, &pmu_stopped); +#endif
local_irq_disable();
@@ -1219,6 +1239,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) #endif preempt_enable();
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (pmu_stopped) + arm_pmu_set_phys_irq(true); +#endif /* * The ARMv8 architecture doesn't give the hypervisor * a mechanism to prevent a guest from dropping to AArch32 EL0 diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6b066e04d..cdc023f84 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -15,6 +15,7 @@ #include <kvm/arm_pmu.h> #include <kvm/arm_vgic.h> #include <asm/arm_pmuv3.h> +#include <asm/kvm_tmi.h>
#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
@@ -324,6 +325,14 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) { u64 reg = 0;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (vcpu_is_tec(vcpu)) { + struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + + reg = run->tec_exit.pmu_ovf_status; + return reg; + } +#endif if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index d712a19e4..3f6a99866 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -736,6 +736,23 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) return 0; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +void arm_pmu_set_phys_irq(bool enable) +{ + int cpu = get_cpu(); + struct arm_pmu *pmu = per_cpu(cpu_armpmu, cpu); + int irq; + + irq = armpmu_get_cpu_irq(pmu, cpu); + if (irq && !enable) + per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); + else if (irq && enable) + per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); + + put_cpu(); +} +#endif + #ifdef CONFIG_CPU_PM static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) { diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 143fbc10e..c50f29236 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -167,6 +167,10 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn); static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } #endif
+#ifdef CONFIG_HISI_VIRTCCA_HOST +void arm_pmu_set_phys_irq(bool enable); +#endif + #ifdef CONFIG_KVM void kvm_host_pmu_init(struct arm_pmu *pmu); #else