From: Yipeng Zou zouyipeng@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I6BO2R CVE: NA
-------------------------------
Since CONFIG_GENERIC_PENDING_IRQ has been introduced for gic-v3-its, there has some kabi changed(irq_desc->pending_mask).
Introduce CONFIG_GENERIC_PENDING_IRQ_FIX_KABI to fix it.
The main way is to use an static array(irq_pending_mask) to replace pending_mask in irq_desc.
Signed-off-by: Yipeng Zou zouyipeng@huawei.com Reviewed-by: Liao Chang liaochang1@huawei.com Reviewed-by: Zhang Jianhua chris.zjh@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- include/linux/irqdesc.h | 3 ++- kernel/irq/Kconfig | 6 ++++++ kernel/irq/debugfs.c | 2 +- kernel/irq/internals.h | 19 +++++++++++++++++++ kernel/irq/irqdesc.c | 8 +++++--- kernel/irq/manage.c | 6 ++++++ kernel/irq/migration.c | 13 ++++++++----- kernel/irq/proc.c | 2 +- 8 files changed, 48 insertions(+), 11 deletions(-)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 8140d8ca5b83..b27701efbe6c 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -77,7 +77,8 @@ struct irq_desc { #ifdef CONFIG_SMP const struct cpumask *affinity_hint; struct irq_affinity_notify *affinity_notify; -#ifdef CONFIG_GENERIC_PENDING_IRQ +#if defined(CONFIG_GENERIC_PENDING_IRQ) && \ + !defined(CONFIG_GENERIC_PENDING_IRQ_FIX_KABI) cpumask_var_t pending_mask; #endif #endif diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index eebbced84e44..743afe22fdfc 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -138,6 +138,12 @@ config GENERIC_IRQ_DEBUGFS
If you don't know what to do here, say N.
+# Support for delayed migration from interrupt context without kabi modification +config GENERIC_PENDING_IRQ_FIX_KABI + bool "Support for delayed migration from interrupt context without kabi modification " + depends on GENERIC_PENDING_IRQ + default n + endmenu
config GENERIC_IRQ_MULTI_HANDLER diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index 216db115b68a..dd93b740a9e7 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -39,7 +39,7 @@ static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) seq_printf(m, "effectiv: %*pbl\n", cpumask_pr_args(msk)); #endif #ifdef CONFIG_GENERIC_PENDING_IRQ - msk = desc->pending_mask; + msk = irq_desc_get_pending_mask(desc); seq_printf(m, "pending: %*pbl\n", cpumask_pr_args(msk)); #endif } diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index f29d6982b3ce..bf7e5b434e1c 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -410,6 +410,7 @@ static inline bool irq_move_pending(struct irq_data *data) { return irqd_is_setaffinity_pending(data); } +#ifndef CONFIG_GENERIC_PENDING_IRQ_FIX_KABI static inline void irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { @@ -424,6 +425,24 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) { return desc->pending_mask; } +#else +extern struct cpumask irq_pending_mask[IRQ_BITMAP_BITS]; + +static inline void +irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) +{ + cpumask_copy(&irq_pending_mask[irq_desc_get_irq(desc)], mask); +} +static inline void +irq_get_pending(struct cpumask *mask, struct irq_desc *desc) +{ + cpumask_copy(mask, &irq_pending_mask[irq_desc_get_irq(desc)]); +} +static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) +{ + return &irq_pending_mask[irq_desc_get_irq(desc)]; +} +#endif static inline bool handle_enforce_irqctx(struct irq_data *data) { return irqd_is_handle_enforce_irqctx(data); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index c0248d4dde19..1ce4b701f26a 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -66,7 +66,8 @@ static int alloc_masks(struct irq_desc *desc, int node) } #endif
-#ifdef CONFIG_GENERIC_PENDING_IRQ +#if defined(CONFIG_GENERIC_PENDING_IRQ) && \ + !defined(CONFIG_GENERIC_PENDING_IRQ_FIX_KABI) if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK free_cpumask_var(desc->irq_common_data.effective_affinity); @@ -86,7 +87,7 @@ static void desc_smp_init(struct irq_desc *desc, int node, cpumask_copy(desc->irq_common_data.affinity, affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_clear(desc->pending_mask); + cpumask_clear(irq_desc_get_pending_mask(desc)); #endif #ifdef CONFIG_NUMA desc->irq_common_data.node = node; @@ -361,7 +362,8 @@ static void delete_irq_desc(unsigned int irq) #ifdef CONFIG_SMP static void free_masks(struct irq_desc *desc) { -#ifdef CONFIG_GENERIC_PENDING_IRQ +#if defined(CONFIG_GENERIC_PENDING_IRQ) && \ + !defined(CONFIG_GENERIC_PENDING_IRQ_FIX_KABI) free_cpumask_var(desc->pending_mask); #endif free_cpumask_var(desc->irq_common_data.affinity); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d3ee0b8bea5f..213f88f7fdfb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -23,6 +23,12 @@
#include "internals.h"
+#ifdef CONFIG_GENERIC_PENDING_IRQ_FIX_KABI +struct cpumask irq_pending_mask[IRQ_BITMAP_BITS] = { + [0 ... IRQ_BITMAP_BITS - 1] = { CPU_BITS_NONE } +}; +#endif + #ifdef CONFIG_IRQ_FORCED_THREADING __read_mostly bool force_irqthreads; EXPORT_SYMBOL_GPL(force_irqthreads); diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index bcb61ee69c20..01b2669ed29f 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -26,7 +26,8 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear) * The outgoing CPU might be the last online target in a pending * interrupt move. If that's the case clear the pending move bit. */ - if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) { + if (cpumask_any_and(irq_desc_get_pending_mask(desc), + cpu_online_mask) >= nr_cpu_ids) { irqd_clr_move_pending(data); return false; } @@ -54,7 +55,7 @@ void irq_move_masked_irq(struct irq_data *idata) return; }
- if (unlikely(cpumask_empty(desc->pending_mask))) + if (unlikely(cpumask_empty(irq_desc_get_pending_mask(desc)))) return;
if (!chip->irq_set_affinity) @@ -74,10 +75,12 @@ void irq_move_masked_irq(struct irq_data *idata) * For correct operation this depends on the caller * masking the irqs. */ - if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { + if (cpumask_any_and(irq_desc_get_pending_mask(desc), + cpu_online_mask) < nr_cpu_ids) { int ret;
- ret = irq_do_set_affinity(data, desc->pending_mask, false); + ret = irq_do_set_affinity(data, irq_desc_get_pending_mask(desc), + false); /* * If the there is a cleanup pending in the underlying * vector management, reschedule the move for the next @@ -88,7 +91,7 @@ void irq_move_masked_irq(struct irq_data *idata) return; } } - cpumask_clear(desc->pending_mask); + cpumask_clear(irq_desc_get_pending_mask(desc)); }
void __irq_move_irq(struct irq_data *idata) diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index e8c655b7a430..a914f1772c62 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -54,7 +54,7 @@ static int show_irq_affinity(int type, struct seq_file *m) mask = desc->irq_common_data.affinity; #ifdef CONFIG_GENERIC_PENDING_IRQ if (irqd_is_setaffinity_pending(&desc->irq_data)) - mask = desc->pending_mask; + mask = irq_desc_get_pending_mask(desc); #endif break; case EFFECTIVE: