From: Marc Zyngier maz@kernel.org
stable inclusion from stable-v5.8-rc1 commit 2f13ff1d1d5c0257c97ea76b86a2d9c99c44a4b9 category: perf bugzilla: https://gitee.com/src-openeuler/kernel/issues/I6UVFG CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=l...
--------------------------------
In order to improve the distribution of LPIs among CPUs, let start by tracking the number of LPIs assigned to CPUs, both for managed and non-managed interrupts (as separate counters).
Signed-off-by: Marc Zyngier maz@kernel.org Tested-by: John Garry john.garry@huawei.com Link: https://lore.kernel.org/r/20200515165752.121296-2-maz@kernel.org Signed-off-by: Ruan Jinjie ruanjinjie@huawei.com Reviewed-by: Zhang Jianhua chris.zjh@huawei.com Signed-off-by: Zhang Changzhong zhangchangzhong@huawei.com --- drivers/irqchip/irq-gic-v3-its.c | 50 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 3 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 15a0292e..ffcd7e2 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -176,6 +176,13 @@ static struct { int next_victim; } vpe_proxy;
+struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); + static LIST_HEAD(its_nodes); static DEFINE_RAW_SPINLOCK(its_lock); static struct rdists *gic_rdists; @@ -1162,6 +1169,30 @@ static void its_unmask_irq(struct irq_data *d) lpi_update_config(d, 0, LPI_PROP_ENABLED); }
+static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { @@ -1171,17 +1202,21 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + int prev_cpu;
/* A forwarded interrupt should use irq_set_vcpu_affinity */ if (irqd_is_forwarded_to_vcpu(d)) return -EINVAL;
+ prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); + /* lpi cannot be routed to a redistributor that is on a foreign node */ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { if (its_dev->its->numa_node >= 0) { cpu_mask = cpumask_of_node(its_dev->its->numa_node); if (!cpumask_intersects(mask_val, cpu_mask)) - return -EINVAL; + goto err; } }
@@ -1194,17 +1229,24 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, }
if (cpu >= max_cpu) - return -EINVAL; + goto err;
/* don't set the affinity when the target cpu is same as current one */ - if (cpu != its_dev->event_map.col_map[id]) { + if (cpu != prev_cpu) { target_col = &its_dev->its->collections[cpu]; its_send_movi(its_dev, target_col, id); its_dev->event_map.col_map[id] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); }
+ its_inc_lpi_count(d, cpu); + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; + }
static u64 its_irq_get_msi_base(struct its_device *its_dev) @@ -2897,6 +2939,7 @@ static int its_irq_domain_activate(struct irq_domain *domain, cpu = cpumask_first(cpu_online_mask); }
+ its_inc_lpi_count(d, cpu); its_dev->event_map.col_map[event] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -2911,6 +2954,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain, struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d);
+ its_dec_lpi_count(d, its_dev->event_map.col_map[event]); /* Stop the delivery of interrupts */ its_send_discard(its_dev, event); }