From: wanghaibin wanghaibin.wang@huawei.com
virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8URKX CVE: NA
------------------------------------------------------------------
Plumb the configuration of vtimer interrupts into the first VCPU run.
The restore of a guest will also benefit from it. By configering vPPIs at first run, we can move the restored PPIs to the HW if that's what the HW allows us to do.
Signed-off-by: wanghaibin wanghaibin.wang@huawei.com Signed-off-by: Zenghui Yu yuzenghui@huawei.com Signed-off-by: Kunkun Jiang jiangkunkun@huawei.com Signed-off-by: Dongxu Sun sundongxu3@huawei.com --- arch/arm64/kvm/vgic/vgic-v3.c | 3 +++ arch/arm64/kvm/vgic/vgic-v4.c | 46 +++++++++++++++++++++++++++++++++++ arch/arm64/kvm/vgic/vgic.h | 1 + 3 files changed, 50 insertions(+)
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 72c7cf82416d..f390efd51c5e 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -571,6 +571,9 @@ int vgic_v3_map_resources(struct kvm *kvm) if (kvm_vgic_global_state.has_gicv4_1) vgic_v4_configure_vsgis(kvm);
+ if (kvm_vgic_vtimer_irqbypass_support()) + vgic_v4_configure_vtimer(kvm); + return 0; }
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 339a55194b2c..f59c43410cf6 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -204,6 +204,52 @@ void vgic_v4_configure_vsgis(struct kvm *kvm) kvm_arm_resume_guest(kvm); }
+static void vgic_v4_enable_vtimer(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vtimer_info *vtimer = &vgic_cpu->vtimer; + struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; + struct vgic_irq *irq; + struct irq_desc *desc; + int ret; + + if (!vgic_cpu->vtimer_irqbypass) + return; + + irq = vgic_get_irq(vcpu->kvm, vcpu, vtimer->intid); + irq->host_irq = irq_find_mapping(vpe->sgi_domain, vtimer->intid); + + /* Transfer the full irq state to the vPE */ + vgic_v4_sync_sgi_config(vpe, irq); + desc = irq_to_desc(irq->host_irq); + ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc), + false); + if (!WARN_ON(ret)) { + /* Transfer pending state */ + ret = irq_set_irqchip_state(irq->host_irq, + IRQCHIP_STATE_PENDING, + irq->pending_latch); + WARN_ON(ret); + irq->pending_latch = false; + + /* Transfer active state */ + vtimer->set_active_stat(vcpu, irq->intid, irq->active); + irq->active = false; + } + + vgic_put_irq(vcpu->kvm, irq); +} + +/* Must be called with the kvm lock held */ +void vgic_v4_configure_vtimer(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + + kvm_for_each_vcpu(i, vcpu, kvm) + vgic_v4_enable_vtimer(vcpu); +} + /* * Must be called with GICv4.1 and the vPE unmapped, which * indicates the invalidation of any VPT caches associated diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index e25b0a8b9870..ae08a15d5b7e 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -351,5 +351,6 @@ void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_configure_vsgis(struct kvm *kvm); void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); +void vgic_v4_configure_vtimer(struct kvm *kvm);
#endif