Dependencies: CONFIG_DETECT_HUNG_TASK=y CONFIG_WATCHDOG=y
Li Zhengyu (1): arm64: Add non nmi ipi backtrace support
arch/arm64/Kconfig | 8 +++++ arch/arm64/configs/openeuler_defconfig | 1 + arch/arm64/kernel/ipi_nmi.c | 41 ++++++++++++++++++++++---- 3 files changed, 45 insertions(+), 5 deletions(-)
From: Li Zhengyu lizhengyu3@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8PRY6 CVE: NA
--------------------------------
Use non nmi ipi to support backtrace on arm64 with nmi unsupported. It has been tested on qemu.
Signed-off-by: Li Zhengyu lizhengyu3@huawei.com Signed-off-by: Liao Chen liaochen4@huawei.com --- arch/arm64/Kconfig | 8 +++++ arch/arm64/configs/openeuler_defconfig | 1 + arch/arm64/kernel/ipi_nmi.c | 41 ++++++++++++++++++++++---- 3 files changed, 45 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index d1705fdc046a..9568049f36e3 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2242,6 +2242,14 @@ config ARM64_DEBUG_PRIORITY_MASKING If unsure, say N endif # ARM64_PSEUDO_NMI
+config NON_NMI_IPI_BACKTRACE + bool "Support non nmi ipi backtrace" + depends on ARM64_PSEUDO_NMI + default n + help + This adds support for non nmi ipi backtrace, which allows hungtask_monitor + to print stack trace when recovering from a hung task. + config RELOCATABLE bool "Build a relocatable kernel image" if EXPERT select ARCH_HAS_RELR diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index a417275129a4..5a8e078acbfa 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -535,6 +535,7 @@ CONFIG_ARM64_SVE=y CONFIG_ARM64_SME=y CONFIG_ARM64_PSEUDO_NMI=y # CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set +CONFIG_NON_NMI_IPI_BACKTRACE=y CONFIG_RELOCATABLE=y CONFIG_RANDOMIZE_BASE=y CONFIG_KASLR_SKIP_MEM_RANGE=y diff --git a/arch/arm64/kernel/ipi_nmi.c b/arch/arm64/kernel/ipi_nmi.c index 9249e8b100aa..cab500ca4cb8 100644 --- a/arch/arm64/kernel/ipi_nmi.c +++ b/arch/arm64/kernel/ipi_nmi.c @@ -33,14 +33,45 @@ void arm64_send_nmi(cpumask_t *mask) __ipi_send_mask(ipi_nmi_desc, mask); }
-bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) +#ifdef CONFIG_NON_NMI_IPI_BACKTRACE +static void ipi_cpu_backtrace(void *info) { - if (!ipi_nmi_desc) - return false; + __printk_safe_enter(); + nmi_cpu_backtrace(get_irq_regs()); + __printk_safe_exit(); +} + +static DEFINE_PER_CPU(call_single_data_t, cpu_backtrace_csd) = + CSD_INIT(ipi_cpu_backtrace, NULL); + +static void arm64_send_ipi(cpumask_t *mask) +{ + call_single_data_t *csd; + int this_cpu = raw_smp_processor_id(); + int cpu; + int ret; + + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + csd = &per_cpu(cpu_backtrace_csd, cpu); + ret = smp_call_function_single_async(cpu, csd); + if (ret) + pr_info("Sending IPI failed to CPU %d\n", cpu); + } +} +#endif
- nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_send_nmi); +bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) +{ + if (ipi_nmi_desc) + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_send_nmi); +#ifdef CONFIG_NON_NMI_IPI_BACKTRACE + else + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_send_ipi); +#endif
- return true; + return false; }
static irqreturn_t ipi_nmi_handler(int irq, void *data)