
From: Xiang Chen <chenxiang66@hisilicon.com> virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBPH85 ------------------------------------------------------------------------ For direct mode, need to use vpe id, so need to pre-allocate vpe id before reset_mpidr. Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com> Signed-off-by: Nianyao Tang <tangnianyao@huawei.com> Signed-off-by: Jinqian Yang <yangjinqian1@huawei.com> --- arch/arm64/kvm/sys_regs.c | 18 ++++++++++++-- arch/arm64/kvm/vgic/vgic-init.c | 40 ++++++++++++++++++++++++++++++ drivers/irqchip/irq-gic-v3-its.c | 18 ++++++++++---- drivers/irqchip/irq-gic-v3.c | 3 +++ include/kvm/arm_vgic.h | 2 ++ include/linux/irqchip/arm-gic-v4.h | 1 + 6 files changed, 75 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4d9bf04c1023..eb6e99e91951 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -712,17 +712,31 @@ static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) } extern struct static_key_false ipiv_enable; +extern struct static_key_false ipiv_direct; static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { + struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; u64 mpidr; if (static_branch_unlikely(&ipiv_enable)) { /* + * For direct ipiv mode, use vpeid as aff2/aff3 + * For indirect ipiv mode, use vcpu_id to index vpeid * To avoid sending multi-SGIs in guest OS, make aff1/aff2 unique */ - mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); - mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); + if (static_branch_unlikely(&ipiv_direct)) { + u64 vpe_id_aff3, vpe_id_aff2; + + vpe_id_aff2 = (vpe->vpe_id >> 8) & 0xff; + vpe_id_aff3 = (vpe->vpe_id & 0xff); + + mpidr = vpe_id_aff2 << MPIDR_LEVEL_SHIFT(2); + mpidr |= vpe_id_aff3 << MPIDR_LEVEL_SHIFT(3); + } else { + mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); + mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); + } } else { /* * Map the vcpu_id into the first three affinity level fields of diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 113accd6cd0b..1be6c9a4970a 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -694,3 +694,43 @@ int kvm_vgic_hyp_init(void) acpi_unregister_gsi(18); return ret; } + +extern int its_vpe_id_alloc(void); +extern void its_vpe_id_free(u16 id); +int kvm_vgic_vpe_id_alloc(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + int vpe_id; + + if (vgic_cpu->vgic_v3.its_vpe.vpe_id_allocated) { + kvm_err("[%s]vpe_id already allocated\n", __func__); + return -1; + } + + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) { + kvm_err("[%s]alloc vpe id fail: vpe_id=%d\n", __func__, vpe_id); + return vpe_id; + } + + vgic_cpu->vgic_v3.its_vpe.vpe_id = vpe_id; + vgic_cpu->vgic_v3.its_vpe.vpe_id_allocated = true; + + return 0; +} + +int kvm_vgic_vpe_id_free(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + int vpe_id; + + vpe_id = vgic_cpu->vgic_v3.its_vpe.vpe_id; + + if (!vgic_cpu->vgic_v3.its_vpe.vpe_id_allocated) + return 0; + + its_vpe_id_free(vpe_id); + vgic_cpu->vgic_v3.its_vpe.vpe_id_allocated = false; + + return 0; +} diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d4fb4f57133c..a607b6373c59 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -4997,15 +4997,17 @@ static const struct irq_domain_ops its_sgi_domain_ops = { .deactivate = its_sgi_irq_domain_deactivate, }; -static int its_vpe_id_alloc(void) +int its_vpe_id_alloc(void) { return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); } +EXPORT_SYMBOL(its_vpe_id_alloc); -static void its_vpe_id_free(u16 id) +void its_vpe_id_free(u16 id) { ida_simple_remove(&its_vpeid_ida, id); } +EXPORT_SYMBOL(its_vpe_id_free); static int its_vpe_init(struct its_vpe *vpe) { @@ -5013,9 +5015,13 @@ static int its_vpe_init(struct its_vpe *vpe) int vpe_id; /* Allocate vpe_id */ - vpe_id = its_vpe_id_alloc(); - if (vpe_id < 0) - return vpe_id; + if (!vpe->vpe_id_allocated) { + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + } else { + vpe_id = vpe->vpe_id; + } /* Allocate VPT */ vpt_page = its_allocate_pending_table(GFP_KERNEL); @@ -5032,6 +5038,7 @@ static int its_vpe_init(struct its_vpe *vpe) raw_spin_lock_init(&vpe->vpe_lock); vpe->vpe_id = vpe_id; + vpe->vpe_id_allocated = true; vpe->vpt_page = vpt_page; atomic_set(&vpe->vmapp_count, 0); if (!gic_rdists->has_rvpeid) @@ -5044,6 +5051,7 @@ static void its_vpe_teardown(struct its_vpe *vpe) { its_vpe_db_proxy_unmap(vpe); its_vpe_id_free(vpe->vpe_id); + vpe->vpe_id_allocated = false; its_free_pending_table(vpe->vpt_page); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 7a64d77be181..d1c2e2315e42 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -114,6 +114,9 @@ EXPORT_SYMBOL(gic_nonsecure_priorities); DEFINE_STATIC_KEY_FALSE(ipiv_enable); EXPORT_SYMBOL(ipiv_enable); +DEFINE_STATIC_KEY_FALSE(ipiv_direct); +EXPORT_SYMBOL(ipiv_direct); + /* * When the Non-secure world has access to group 0 interrupts (as a * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 0b734d6f3d21..460d226450af 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -454,6 +454,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); void kvm_vgic_init_cpu_hardware(void); +int kvm_vgic_vpe_id_alloc(struct kvm_vcpu *vcpu); +int kvm_vgic_vpe_id_free(struct kvm_vcpu *vcpu); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, bool level, void *owner); diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 630b18f455c0..4ca3c48cf399 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -95,6 +95,7 @@ struct its_vpe { u16 vpe_id; /* Pending VLPIs on schedule out? */ bool pending_last; + KABI_EXTEND(bool vpe_id_allocated) }; /* -- 2.33.0