From: Yipeng Zou zouyipeng@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I6WPFT CVE: NA
--------------------------------
This reverts commit 16073a1932c77985a677bec41333218d7eb7ae5f.
Signed-off-by: Yipeng Zou zouyipeng@huawei.com Reviewed-by: Zhang Jianhua chris.zjh@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- include/linux/irqdesc.h | 3 +-- kernel/irq/Kconfig | 6 ------ kernel/irq/debugfs.c | 2 +- kernel/irq/internals.h | 19 ------------------- kernel/irq/irqdesc.c | 8 +++----- kernel/irq/manage.c | 6 ------ kernel/irq/migration.c | 13 +++++-------- kernel/irq/proc.c | 2 +- 8 files changed, 11 insertions(+), 48 deletions(-)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index b27701efbe6c..8140d8ca5b83 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -77,8 +77,7 @@ struct irq_desc { #ifdef CONFIG_SMP const struct cpumask *affinity_hint; struct irq_affinity_notify *affinity_notify; -#if defined(CONFIG_GENERIC_PENDING_IRQ) && \ - !defined(CONFIG_GENERIC_PENDING_IRQ_FIX_KABI) +#ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_var_t pending_mask; #endif #endif diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 0beb81d2dcbe..14a85d0161ea 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -136,12 +136,6 @@ config GENERIC_IRQ_DEBUGFS
If you don't know what to do here, say N.
-# Support for delayed migration from interrupt context without kabi modification -config GENERIC_PENDING_IRQ_FIX_KABI - bool "Support for delayed migration from interrupt context without kabi modification " - depends on GENERIC_PENDING_IRQ - default n - endmenu
config GENERIC_IRQ_MULTI_HANDLER diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index dd93b740a9e7..216db115b68a 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -39,7 +39,7 @@ static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) seq_printf(m, "effectiv: %*pbl\n", cpumask_pr_args(msk)); #endif #ifdef CONFIG_GENERIC_PENDING_IRQ - msk = irq_desc_get_pending_mask(desc); + msk = desc->pending_mask; seq_printf(m, "pending: %*pbl\n", cpumask_pr_args(msk)); #endif } diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index bf7e5b434e1c..f29d6982b3ce 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -410,7 +410,6 @@ static inline bool irq_move_pending(struct irq_data *data) { return irqd_is_setaffinity_pending(data); } -#ifndef CONFIG_GENERIC_PENDING_IRQ_FIX_KABI static inline void irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { @@ -425,24 +424,6 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) { return desc->pending_mask; } -#else -extern struct cpumask irq_pending_mask[IRQ_BITMAP_BITS]; - -static inline void -irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) -{ - cpumask_copy(&irq_pending_mask[irq_desc_get_irq(desc)], mask); -} -static inline void -irq_get_pending(struct cpumask *mask, struct irq_desc *desc) -{ - cpumask_copy(mask, &irq_pending_mask[irq_desc_get_irq(desc)]); -} -static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) -{ - return &irq_pending_mask[irq_desc_get_irq(desc)]; -} -#endif static inline bool handle_enforce_irqctx(struct irq_data *data) { return irqd_is_handle_enforce_irqctx(data); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 1ce4b701f26a..c0248d4dde19 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -66,8 +66,7 @@ static int alloc_masks(struct irq_desc *desc, int node) } #endif
-#if defined(CONFIG_GENERIC_PENDING_IRQ) && \ - !defined(CONFIG_GENERIC_PENDING_IRQ_FIX_KABI) +#ifdef CONFIG_GENERIC_PENDING_IRQ if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK free_cpumask_var(desc->irq_common_data.effective_affinity); @@ -87,7 +86,7 @@ static void desc_smp_init(struct irq_desc *desc, int node, cpumask_copy(desc->irq_common_data.affinity, affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_clear(irq_desc_get_pending_mask(desc)); + cpumask_clear(desc->pending_mask); #endif #ifdef CONFIG_NUMA desc->irq_common_data.node = node; @@ -362,8 +361,7 @@ static void delete_irq_desc(unsigned int irq) #ifdef CONFIG_SMP static void free_masks(struct irq_desc *desc) { -#if defined(CONFIG_GENERIC_PENDING_IRQ) && \ - !defined(CONFIG_GENERIC_PENDING_IRQ_FIX_KABI) +#ifdef CONFIG_GENERIC_PENDING_IRQ free_cpumask_var(desc->pending_mask); #endif free_cpumask_var(desc->irq_common_data.affinity); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index fc180f45f3a0..163712c76520 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -23,12 +23,6 @@
#include "internals.h"
-#ifdef CONFIG_GENERIC_PENDING_IRQ_FIX_KABI -struct cpumask irq_pending_mask[IRQ_BITMAP_BITS] = { - [0 ... IRQ_BITMAP_BITS - 1] = { CPU_BITS_NONE } -}; -#endif - #ifdef CONFIG_IRQ_FORCED_THREADING __read_mostly bool force_irqthreads; EXPORT_SYMBOL_GPL(force_irqthreads); diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 1ca2fbf5c32d..980ad6204cad 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -26,8 +26,7 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear) * The outgoing CPU might be the last online target in a pending * interrupt move. If that's the case clear the pending move bit. */ - if (cpumask_any_and(irq_desc_get_pending_mask(desc), - cpu_online_mask) >= nr_cpu_ids) { + if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) { irqd_clr_move_pending(data); return false; } @@ -55,7 +54,7 @@ void irq_move_masked_irq(struct irq_data *idata) return; }
- if (unlikely(cpumask_empty(irq_desc_get_pending_mask(desc)))) + if (unlikely(cpumask_empty(desc->pending_mask))) return;
if (!chip->irq_set_affinity) @@ -75,12 +74,10 @@ void irq_move_masked_irq(struct irq_data *idata) * For correct operation this depends on the caller * masking the irqs. */ - if (cpumask_any_and(irq_desc_get_pending_mask(desc), - cpu_online_mask) < nr_cpu_ids) { + if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { int ret;
- ret = irq_do_set_affinity(data, irq_desc_get_pending_mask(desc), - false); + ret = irq_do_set_affinity(data, desc->pending_mask, false); /* * If the there is a cleanup pending in the underlying * vector management, reschedule the move for the next @@ -91,7 +88,7 @@ void irq_move_masked_irq(struct irq_data *idata) return; } } - cpumask_clear(irq_desc_get_pending_mask(desc)); + cpumask_clear(desc->pending_mask); }
void __irq_move_irq(struct irq_data *idata) diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 8bd25bba99b1..056fd6dd7319 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -54,7 +54,7 @@ static int show_irq_affinity(int type, struct seq_file *m) mask = desc->irq_common_data.affinity; #ifdef CONFIG_GENERIC_PENDING_IRQ if (irqd_is_setaffinity_pending(&desc->irq_data)) - mask = irq_desc_get_pending_mask(desc); + mask = desc->pending_mask; #endif break; case EFFECTIVE: