
From: Kan Liang <kan.liang@linux.intel.com> mainline inclusion from mainline-v4.20-rc1 commit ba12d20edc5caf9835006d8f3efd4ed18465c75b category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I47H3V CVE: NA -------------------------------- commit ba12d20edc5caf9835006d8f3efd4ed18465c75b upstream Backport summary: backport to kernel 4.19.57 for ICX perf topdown support The Arch Perfmon v4 PMI handler is substantially different than the older PMI handler. Instead of adding more and more ifs cleanly fork the new handler into a new function, with the main common code factored out into a common function. Fix complaint from checkpatch.pl by removing "false" from "static bool warned". No functional change. Based-on-code-from: Andi Kleen <ak@linux.intel.com> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: acme@kernel.org Link: http://lkml.kernel.org/r/1533712328-2834-1-git-send-email-kan.liang@linux.in... Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Yunying Sun <yunying.sun@intel.com> Signed-off-by: Jackie Liu <liuyun01@kylinos.cn> Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> Reviewed-by: Wei Li <liwei391@huawei.com> Reviewed-by: Xie XiuQi <xiexiuqi@huawei.com> Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> --- arch/x86/events/intel/core.c | 109 +++++++++++++++++++---------------- 1 file changed, 60 insertions(+), 49 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 28a0629c076f7..c74e675e432cf 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2308,59 +2308,15 @@ static void intel_pmu_reset(void) local_irq_restore(flags); } -/* - * This handler is triggered by the local APIC, so the APIC IRQ handling - * rules apply: - */ -static int intel_pmu_handle_irq(struct pt_regs *regs) +static int handle_pmi_common(struct pt_regs *regs, u64 status) { struct perf_sample_data data; - struct cpu_hw_events *cpuc; - int bit, loops; - u64 status; - int handled; - int pmu_enabled; - - cpuc = this_cpu_ptr(&cpu_hw_events); - - /* - * Save the PMU state. - * It needs to be restored when leaving the handler. - */ - pmu_enabled = cpuc->enabled; - /* - * No known reason to not always do late ACK, - * but just in case do it opt-in. - */ - if (!x86_pmu.late_ack) - apic_write(APIC_LVTPC, APIC_DM_NMI); - intel_bts_disable_local(); - cpuc->enabled = 0; - __intel_pmu_disable_all(); - handled = intel_pmu_drain_bts_buffer(); - handled += intel_bts_interrupt(); - status = intel_pmu_get_status(); - if (!status) - goto done; - - loops = 0; -again: - intel_pmu_lbr_read(); - intel_pmu_ack_status(status); - if (++loops > 100) { - static bool warned = false; - if (!warned) { - WARN(1, "perfevents: irq loop stuck!\n"); - perf_event_print_debug(); - warned = true; - } - intel_pmu_reset(); - goto done; - } + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int bit; + int handled = 0; inc_irq_stat(apic_perf_irqs); - /* * Ignore a range of extra bits in status that do not indicate * overflow by themselves. @@ -2369,7 +2325,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) GLOBAL_STATUS_ASIF | GLOBAL_STATUS_LBRS_FROZEN); if (!status) - goto done; + return 0; /* * In case multiple PEBS events are sampled at the same time, * it is possible to have GLOBAL_STATUS bit 62 set indicating @@ -2439,6 +2395,61 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) x86_pmu_stop(event, 0); } + return handled; +} + +/* + * This handler is triggered by the local APIC, so the APIC IRQ handling + * rules apply: + */ +static int intel_pmu_handle_irq(struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc; + int loops; + u64 status; + int handled; + int pmu_enabled; + + cpuc = this_cpu_ptr(&cpu_hw_events); + + /* + * Save the PMU state. + * It needs to be restored when leaving the handler. + */ + pmu_enabled = cpuc->enabled; + /* + * No known reason to not always do late ACK, + * but just in case do it opt-in. + */ + if (!x86_pmu.late_ack) + apic_write(APIC_LVTPC, APIC_DM_NMI); + intel_bts_disable_local(); + cpuc->enabled = 0; + __intel_pmu_disable_all(); + handled = intel_pmu_drain_bts_buffer(); + handled += intel_bts_interrupt(); + status = intel_pmu_get_status(); + if (!status) + goto done; + + loops = 0; +again: + intel_pmu_lbr_read(); + intel_pmu_ack_status(status); + if (++loops > 100) { + static bool warned; + + if (!warned) { + WARN(1, "perfevents: irq loop stuck!\n"); + perf_event_print_debug(); + warned = true; + } + intel_pmu_reset(); + goto done; + } + + handled += handle_pmi_common(regs, status); + /* * Repeat if there is more work to be done: */ -- 2.25.1