From: Nicholas Piggin npiggin@gmail.com
stable inclusion from stable-v5.10.94 commit c9ffa84a3bd1e7afc1fd2a5836bf0c87ff4feb96 bugzilla: https://gitee.com/openeuler/kernel/issues/I531X9
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
[ Upstream commit 156b5371a9c2482a9ad23ec82d1a4f89a3ab430d ]
This is required in order to allow more significant differences between NMI type interrupt handlers and regular asynchronous handlers.
Signed-off-by: Nicholas Piggin npiggin@gmail.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20210130130852.2952424-20-npiggin@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Acked-by: Xie XiuQi xiexiuqi@huawei.com --- arch/powerpc/kernel/traps.c | 31 +++++++++++++++++++++++++++- arch/powerpc/perf/core-book3s.c | 35 ++------------------------------ arch/powerpc/perf/core-fsl-emb.c | 25 ----------------------- 3 files changed, 32 insertions(+), 59 deletions(-)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 77dffea3d537..069d451240fa 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1922,11 +1922,40 @@ void vsx_unavailable_tm(struct pt_regs *regs) } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-void performance_monitor_exception(struct pt_regs *regs) +static void performance_monitor_exception_nmi(struct pt_regs *regs) +{ + nmi_enter(); + + __this_cpu_inc(irq_stat.pmu_irqs); + + perf_irq(regs); + + nmi_exit(); +} + +static void performance_monitor_exception_async(struct pt_regs *regs) { + irq_enter(); + __this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs); + + irq_exit(); +} + +void performance_monitor_exception(struct pt_regs *regs) +{ + /* + * On 64-bit, if perf interrupts hit in a local_irq_disable + * (soft-masked) region, we consider them as NMIs. This is required to + * prevent hash faults on user addresses when reading callchains (and + * looks better from an irq tracing perspective). + */ + if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs))) + performance_monitor_exception_nmi(regs); + else + performance_monitor_exception_async(regs); }
#ifdef CONFIG_PPC_ADV_DEBUG_REGS diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 7bda7499d040..b5cac8ddcf5b 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -110,10 +110,6 @@ static inline void perf_read_regs(struct pt_regs *regs) { regs->result = 0; } -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return 0; -}
static inline int siar_valid(struct pt_regs *regs) { @@ -332,15 +328,6 @@ static inline void perf_read_regs(struct pt_regs *regs) regs->result = use_siar; }
-/* - * If interrupts were soft-disabled when a PMU interrupt occurs, treat - * it as an NMI. - */ -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return (regs->softe & IRQS_DISABLED); -} - /* * On processors like P7+ that have the SIAR-Valid bit, marked instructions * must be sampled only if the SIAR-valid bit is set. @@ -2254,7 +2241,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) struct perf_event *event; unsigned long val[8]; int found, active; - int nmi;
if (cpuhw->n_limited) freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), @@ -2262,18 +2248,6 @@ static void __perf_event_interrupt(struct pt_regs *regs)
perf_read_regs(regs);
- /* - * If perf interrupts hit in a local_irq_disable (soft-masked) region, - * we consider them as NMIs. This is required to prevent hash faults on - * user addresses when reading callchains. See the NMI test in - * do_hash_page. - */ - nmi = perf_intr_is_nmi(regs); - if (nmi) - nmi_enter(); - else - irq_enter(); - /* Read all the PMCs since we'll need them a bunch of times */ for (i = 0; i < ppmu->n_counter; ++i) val[i] = read_pmc(i + 1); @@ -2319,8 +2293,8 @@ static void __perf_event_interrupt(struct pt_regs *regs) } } } - if (!found && !nmi && printk_ratelimit()) - printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); + if (unlikely(!found) && !arch_irq_disabled_regs(regs)) + printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");
/* * Reset MMCR0 to its normal value. This will set PMXE and @@ -2330,11 +2304,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) * we get back out of this interrupt. */ write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0); - - if (nmi) - nmi_exit(); - else - irq_exit(); }
static void perf_event_interrupt(struct pt_regs *regs) diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index e0e7e276bfd2..ee721f420a7b 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c @@ -31,19 +31,6 @@ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex);
-/* - * If interrupts were soft-disabled when a PMU interrupt occurs, treat - * it as an NMI. - */ -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ -#ifdef __powerpc64__ - return (regs->softe & IRQS_DISABLED); -#else - return 0; -#endif -} - static void perf_event_interrupt(struct pt_regs *regs);
/* @@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs) struct perf_event *event; unsigned long val; int found = 0; - int nmi; - - nmi = perf_intr_is_nmi(regs); - if (nmi) - nmi_enter(); - else - irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) { event = cpuhw->event[i]; @@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs) mtmsr(mfmsr() | MSR_PMM); mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); isync(); - - if (nmi) - nmi_exit(); - else - irq_exit(); }
void hw_perf_event_setup(int cpu)