From: Dong Kai dongkai11@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4F3V1 CVE: NA
--------------------------------
The softlockup and hardlockup detector only check the status of the cpu which it resides. If certain cpu core suspends, they are both not works. There is no any valid log but the cpu already abnormal and brings a lot of problems of system. To detect this case, we add the corelockup detector.
First we use whether cpu core can responds to nmi as a sectence to determine if it is suspended. Then things is simple. Per cpu core maintains it's nmi interrupt counts and detector the nmi_counts of next cpu core. If the nmi interrupt counts not changed any more which means it can't respond nmi normally, we regard it as suspend.
To ensure robustness, only consecutive lost nmi more than two times then trigger the warn.
The detection chain is as following: cpu0->cpu1->...->cpuN->cpu0
Signed-off-by: Dong Kai dongkai11@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/nmi.h | 6 ++ kernel/watchdog.c | 15 +++- kernel/watchdog_hld.c | 165 ++++++++++++++++++++++++++++++++++++++++++ lib/Kconfig.debug | 8 ++ 4 files changed, 192 insertions(+), 2 deletions(-)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 84f324d65068b..745d66c36e244 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -124,6 +124,12 @@ static inline int hardlockup_detector_perf_init(void) { return 0; } # endif #endif
+#ifdef CONFIG_CORELOCKUP_DETECTOR +extern void corelockup_detector_init(void); +extern void corelockup_detector_online_cpu(unsigned int cpu); +extern void corelockup_detector_offline_cpu(unsigned int cpu); +#endif + void watchdog_nmi_stop(void); void watchdog_nmi_start(void); int watchdog_nmi_probe(void); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 0dd17265dcbd4..8b54fd30a597f 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -551,15 +551,23 @@ static void softlockup_start_all(void)
int lockup_detector_online_cpu(unsigned int cpu) { - if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) + if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) { watchdog_enable(cpu); +#ifdef CONFIG_CORELOCKUP_DETECTOR + corelockup_detector_online_cpu(cpu); +#endif + } return 0; }
int lockup_detector_offline_cpu(unsigned int cpu) { - if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) + if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) { watchdog_disable(cpu); +#ifdef CONFIG_CORELOCKUP_DETECTOR + corelockup_detector_offline_cpu(cpu); +#endif + } return 0; }
@@ -783,4 +791,7 @@ void __init lockup_detector_init(void) if (!watchdog_nmi_probe()) nmi_watchdog_available = true; lockup_detector_setup(); +#ifdef CONFIG_CORELOCKUP_DETECTOR + corelockup_detector_init(); +#endif } diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 904a95262fcf6..e965c31958203 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -39,6 +39,163 @@ notrace void __weak arch_touch_nmi_watchdog(void) } EXPORT_SYMBOL(arch_touch_nmi_watchdog);
+#ifdef CONFIG_CORELOCKUP_DETECTOR +/* + * The softlockup and hardlockup detector only check the status + * of the cpu which it resides. If certain cpu core suspends, + * they are both not works. There is no any valid log but the + * cpu already abnormal and brings a lot of problems of system. + * To detect this case, we add the corelockup detector. + * + * First we use whether cpu core can responds to nmi as a sectence + * to determine if it is suspended. Then things is simple. Per cpu + * core maintains it's nmi interrupt counts and detector the + * nmi_counts of next cpu core. If the nmi interrupt counts not + * changed any more which means it can't respond nmi normally, we + * regard it as suspend. + * + * To ensure robustness, only consecutive lost nmi more than two + * times then trigger the warn. + * + * The detection chain is as following: + * cpu0->cpu1->...->cpuN->cpu0 + * + * detector_cpu: the target cpu to detector of current cpu + * nmi_interrupts: the nmi counts of current cpu + * nmi_cnt_saved: saved nmi counts of detector_cpu + * nmi_cnt_missed: the nmi consecutive miss counts of detector_cpu + */ +static DEFINE_PER_CPU(unsigned int, detector_cpu); +static DEFINE_PER_CPU(unsigned long, nmi_interrupts); +static DEFINE_PER_CPU(unsigned long, nmi_cnt_saved); +static DEFINE_PER_CPU(unsigned long, nmi_cnt_missed); +static DEFINE_PER_CPU(bool, core_watchdog_warn); + +static void watchdog_nmi_interrupts(void) +{ + __this_cpu_inc(nmi_interrupts); +} + +static void corelockup_status_copy(unsigned int from, unsigned int to) +{ + per_cpu(nmi_cnt_saved, to) = per_cpu(nmi_cnt_saved, from); + per_cpu(nmi_cnt_missed, to) = per_cpu(nmi_cnt_missed, from); + + /* always update detector cpu at the end */ + per_cpu(detector_cpu, to) = per_cpu(detector_cpu, from); +} + +static void corelockup_status_init(unsigned int cpu, unsigned int target) +{ + /* + * initialize saved count to max to avoid unnecessary misjudge + * caused by delay running of nmi on target cpu + */ + per_cpu(nmi_cnt_saved, cpu) = ULONG_MAX; + per_cpu(nmi_cnt_missed, cpu) = 0; + + /* always update detector cpu at the end */ + per_cpu(detector_cpu, cpu) = target; +} + +void __init corelockup_detector_init(void) +{ + unsigned int cpu, next; + + /* detector cpu is set to the next valid logically one */ + for_each_cpu_and(cpu, &watchdog_cpumask, cpu_online_mask) { + next = cpumask_next_and(cpu, &watchdog_cpumask, + cpu_online_mask); + if (next >= nr_cpu_ids) + next = cpumask_first_and(&watchdog_cpumask, + cpu_online_mask); + corelockup_status_init(cpu, next); + } +} + +/* + * Before: first->next + * After: first->[new]->next + */ +void corelockup_detector_online_cpu(unsigned int cpu) +{ + unsigned int first = cpumask_first_and(&watchdog_cpumask, + cpu_online_mask); + + if (WARN_ON(first >= nr_cpu_ids)) + return; + + /* cpu->next */ + corelockup_status_copy(first, cpu); + + /* first->cpu */ + corelockup_status_init(first, cpu); +} + +/* + * Before: prev->cpu->next + * After: prev->next + */ +void corelockup_detector_offline_cpu(unsigned int cpu) +{ + unsigned int prev = nr_cpu_ids; + unsigned int i; + + /* found prev cpu */ + for_each_cpu_and(i, &watchdog_cpumask, cpu_online_mask) { + if (per_cpu(detector_cpu, i) == cpu) { + prev = i; + break; + } + } + + if (WARN_ON(prev == nr_cpu_ids)) + return; + + /* prev->next */ + corelockup_status_copy(cpu, prev); +} + +static bool is_corelockup(unsigned int cpu) +{ + unsigned long nmi_int = per_cpu(nmi_interrupts, cpu); + + /* skip check if only one cpu online */ + if (cpu == smp_processor_id()) + return false; + + if (__this_cpu_read(nmi_cnt_saved) != nmi_int) { + __this_cpu_write(nmi_cnt_saved, nmi_int); + __this_cpu_write(nmi_cnt_missed, 0); + per_cpu(core_watchdog_warn, cpu) = false; + return false; + } + + __this_cpu_inc(nmi_cnt_missed); + if (__this_cpu_read(nmi_cnt_missed) > 2) + return true; + + return false; +} +NOKPROBE_SYMBOL(is_corelockup); + +static void watchdog_corelockup_check(struct pt_regs *regs) +{ + unsigned int cpu = __this_cpu_read(detector_cpu); + + if (is_corelockup(cpu)) { + if (per_cpu(core_watchdog_warn, cpu) == true) + return; + pr_emerg("Watchdog detected core LOCKUP on cpu %d\n", cpu); + + if (hardlockup_panic) + nmi_panic(regs, "Core LOCKUP"); + + per_cpu(core_watchdog_warn, cpu) = true; + } +} +#endif + #ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP static DEFINE_PER_CPU(ktime_t, last_timestamp); static DEFINE_PER_CPU(unsigned int, nmi_rearmed); @@ -106,6 +263,14 @@ static inline bool watchdog_check_timestamp(void)
void watchdog_hardlockup_check(struct pt_regs *regs) { +#ifdef CONFIG_CORELOCKUP_DETECTOR + /* Kick nmi interrupts */ + watchdog_nmi_interrupts(); + + /* corelockup check */ + watchdog_corelockup_check(regs); +#endif + if (__this_cpu_read(watchdog_nmi_touch) == true) { __this_cpu_write(watchdog_nmi_touch, false); return; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0ee305de7d0ec..4a78bacd405bd 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -881,6 +881,14 @@ config HARDLOCKUP_DETECTOR chance to run. The current stack trace is displayed upon detection and the system will stay locked up.
+config CORELOCKUP_DETECTOR + bool "Detect Core Lockups" + depends on HARDLOCKUP_DETECTOR && SOFTLOCKUP_DETECTOR + depends on ARM64 + default n + help + Corelockups is used to check whether cpu core hungup or not. + config BOOTPARAM_HARDLOCKUP_PANIC bool "Panic (Reboot) On Hard Lockups" depends on HARDLOCKUP_DETECTOR
From: Dong Kai dongkai11@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4F3V1 CVE: NA
--------------------------------
When using pmu events as nmi source, the pmu clock is disabled under wfi/wfe mode. And the nmi can't respond periodically. To minimize the misjudgment by wfi/wfe, we adopt a simple method which to disable wfi/wfe at the right time and the watchdog hrtimer is a good baseline.
The watchdog hrtimer is based on generate timer and has high freq than nmi. If watchdog hrtimer not works we disable wfi/wfe mode then the pmu nmi should always responds as long as the cpu core not suspend.
Signed-off-by: Dong Kai dongkai11@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/barrier.h | 15 ++++++++ include/linux/nmi.h | 2 + kernel/watchdog.c | 12 ++++++ kernel/watchdog_hld.c | 63 ++++++++++++++++++++++++++++++++ 4 files changed, 92 insertions(+)
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 3cae78c1ce33b..519a30346e176 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -24,8 +24,23 @@ #define nops(n) asm volatile(__nops(n))
#define sev() asm volatile("sev" : : : "memory") +#ifdef CONFIG_CORELOCKUP_DETECTOR +extern unsigned int close_wfi_wfe; +#define wfe() \ + do { \ + if (likely(close_wfi_wfe == 0)) \ + asm volatile("wfe" : : : "memory"); \ + } while (0) +#define wfi() \ + do { \ + if (likely(close_wfi_wfe == 0)) \ + asm volatile("wfi" : : : "memory"); \ + } while (0) + +#else #define wfe() asm volatile("wfe" : : : "memory") #define wfi() asm volatile("wfi" : : : "memory") +#endif
#define isb() asm volatile("isb" : : : "memory") #define dmb(opt) asm volatile("dmb " #opt : : : "memory") diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 745d66c36e244..6cfb36e889fa4 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -128,6 +128,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; } extern void corelockup_detector_init(void); extern void corelockup_detector_online_cpu(unsigned int cpu); extern void corelockup_detector_offline_cpu(unsigned int cpu); +extern void watchdog_check_hrtimer(void); +extern unsigned long watchdog_hrtimer_interrupts(unsigned int cpu); #endif
void watchdog_nmi_stop(void); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 8b54fd30a597f..9cea1ef8b41bc 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -355,6 +355,13 @@ static int softlockup_fn(void *data) return 0; }
+#ifdef CONFIG_CORELOCKUP_DETECTOR +unsigned long watchdog_hrtimer_interrupts(unsigned int cpu) +{ + return per_cpu(hrtimer_interrupts, cpu); +} +#endif + /* watchdog kicker functions */ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) { @@ -366,6 +373,11 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) if (!watchdog_enabled) return HRTIMER_NORESTART;
+#ifdef CONFIG_CORELOCKUP_DETECTOR + /* check hrtimer of detector cpu */ + watchdog_check_hrtimer(); +#endif + /* kick the hardlockup detector */ watchdog_interrupt_count();
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index e965c31958203..58d7acec4269d 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -60,16 +60,37 @@ EXPORT_SYMBOL(arch_touch_nmi_watchdog); * The detection chain is as following: * cpu0->cpu1->...->cpuN->cpu0 * + * When using pmu events as nmi source, the pmu clock is disabled + * under wfi/wfe mode. And the nmi can't respond periodically. + * To minimize the misjudgment by wfi/wfe, we adopt a simple method + * which to disable wfi/wfe at the right time and the watchdog hrtimer + * is a good baseline. + * + * The watchdog hrtimer is based on generate timer and has high freq + * than nmi. If watchdog hrtimer not works we disable wfi/wfe mode + * then the pmu nmi should always responds as long as the cpu core + * not suspend. + * * detector_cpu: the target cpu to detector of current cpu * nmi_interrupts: the nmi counts of current cpu * nmi_cnt_saved: saved nmi counts of detector_cpu * nmi_cnt_missed: the nmi consecutive miss counts of detector_cpu + * hrint_saved: saved hrtimer interrupts of detector_cpu + * hrint_missed: the hrtimer consecutive miss counts of detector_cpu + * corelockup_cpumask/close_wfi_wfe: + * the cpu mask is set if certain cpu maybe fall in suspend and close + * wfi/wfe mode if any bit is set */ static DEFINE_PER_CPU(unsigned int, detector_cpu); static DEFINE_PER_CPU(unsigned long, nmi_interrupts); static DEFINE_PER_CPU(unsigned long, nmi_cnt_saved); static DEFINE_PER_CPU(unsigned long, nmi_cnt_missed); static DEFINE_PER_CPU(bool, core_watchdog_warn); +static DEFINE_PER_CPU(unsigned long, hrint_saved); +static DEFINE_PER_CPU(unsigned long, hrint_missed); +struct cpumask corelockup_cpumask __read_mostly; +unsigned int close_wfi_wfe; +static bool pmu_based_nmi;
static void watchdog_nmi_interrupts(void) { @@ -80,6 +101,8 @@ static void corelockup_status_copy(unsigned int from, unsigned int to) { per_cpu(nmi_cnt_saved, to) = per_cpu(nmi_cnt_saved, from); per_cpu(nmi_cnt_missed, to) = per_cpu(nmi_cnt_missed, from); + per_cpu(hrint_saved, to) = per_cpu(hrint_saved, from); + per_cpu(hrint_missed, to) = per_cpu(hrint_missed, from);
/* always update detector cpu at the end */ per_cpu(detector_cpu, to) = per_cpu(detector_cpu, from); @@ -93,6 +116,8 @@ static void corelockup_status_init(unsigned int cpu, unsigned int target) */ per_cpu(nmi_cnt_saved, cpu) = ULONG_MAX; per_cpu(nmi_cnt_missed, cpu) = 0; + per_cpu(hrint_saved, cpu) = ULONG_MAX; + per_cpu(hrint_missed, cpu) = 0;
/* always update detector cpu at the end */ per_cpu(detector_cpu, cpu) = target; @@ -113,6 +138,38 @@ void __init corelockup_detector_init(void) } }
+void watchdog_check_hrtimer(void) +{ + unsigned int cpu = __this_cpu_read(detector_cpu); + unsigned long hrint = watchdog_hrtimer_interrupts(cpu); + + /* + * The freq of hrtimer is fast than nmi interrupts and + * the core mustn't hangs if hrtimer still working. + * So update the nmi interrupts in hrtimer either to + * improved robustness of nmi counts check. + */ + watchdog_nmi_interrupts(); + + if (!pmu_based_nmi) + return; + + if (__this_cpu_read(hrint_saved) != hrint) { + __this_cpu_write(hrint_saved, hrint); + __this_cpu_write(hrint_missed, 0); + cpumask_clear_cpu(cpu, &corelockup_cpumask); + } else { + __this_cpu_inc(hrint_missed); + if (__this_cpu_read(hrint_missed) > 2) + cpumask_set_cpu(cpu, &corelockup_cpumask); + } + + if (likely(cpumask_empty(&corelockup_cpumask))) + close_wfi_wfe = 0; + else + close_wfi_wfe = 1; +} + /* * Before: first->next * After: first->[new]->next @@ -141,6 +198,9 @@ void corelockup_detector_offline_cpu(unsigned int cpu) unsigned int prev = nr_cpu_ids; unsigned int i;
+ /* clear bitmap */ + cpumask_clear_cpu(cpu, &corelockup_cpumask); + /* found prev cpu */ for_each_cpu_and(i, &watchdog_cpumask, cpu_online_mask) { if (per_cpu(detector_cpu, i) == cpu) { @@ -476,6 +536,9 @@ int __init hardlockup_detector_perf_init(void) perf_event_release_kernel(this_cpu_read(watchdog_ev)); this_cpu_write(watchdog_ev, NULL); } +#ifdef CONFIG_CORELOCKUP_DETECTOR + pmu_based_nmi = true; +#endif return ret; } #endif /* CONFIG_HARDLOCKUP_DETECTOR_PERF */
From: Dong Kai dongkai11@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4F3V1 CVE: NA
--------------------------------
Add cmdline params "enable_corelockup_detector" to support enable core suspend detector. And enable defaultly within ascend features.
Signed-off-by: Dong Kai dongkai11@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/mm/init.c | 7 +++++++ include/linux/nmi.h | 1 + kernel/watchdog.c | 12 ++++++++---- kernel/watchdog_hld.c | 18 ++++++++++++++---- 4 files changed, 30 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 55fc6a0206796..8cdf92626c2c6 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -938,6 +938,9 @@ __setup("keepinitrd", keepinitrd_setup); #ifdef CONFIG_ASCEND_FEATURES
#include <linux/perf/arm_pmu.h> +#ifdef CONFIG_CORELOCKUP_DETECTOR +#include <linux/nmi.h> +#endif
void ascend_enable_all_features(void) { @@ -970,6 +973,10 @@ void ascend_enable_all_features(void) #ifdef CONFIG_ARM64_PSEUDO_NMI enable_pseudo_nmi = true; #endif + +#ifdef CONFIG_CORELOCKUP_DETECTOR + enable_corelockup_detector = true; +#endif }
static int __init ascend_enable_setup(char *__unused) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 6cfb36e889fa4..2acb9195947a9 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -130,6 +130,7 @@ extern void corelockup_detector_online_cpu(unsigned int cpu); extern void corelockup_detector_offline_cpu(unsigned int cpu); extern void watchdog_check_hrtimer(void); extern unsigned long watchdog_hrtimer_interrupts(unsigned int cpu); +extern bool enable_corelockup_detector; #endif
void watchdog_nmi_stop(void); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 9cea1ef8b41bc..463c4c11cecba 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -375,7 +375,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
#ifdef CONFIG_CORELOCKUP_DETECTOR /* check hrtimer of detector cpu */ - watchdog_check_hrtimer(); + if (enable_corelockup_detector) + watchdog_check_hrtimer(); #endif
/* kick the hardlockup detector */ @@ -566,7 +567,8 @@ int lockup_detector_online_cpu(unsigned int cpu) if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) { watchdog_enable(cpu); #ifdef CONFIG_CORELOCKUP_DETECTOR - corelockup_detector_online_cpu(cpu); + if (enable_corelockup_detector) + corelockup_detector_online_cpu(cpu); #endif } return 0; @@ -577,7 +579,8 @@ int lockup_detector_offline_cpu(unsigned int cpu) if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) { watchdog_disable(cpu); #ifdef CONFIG_CORELOCKUP_DETECTOR - corelockup_detector_offline_cpu(cpu); + if (enable_corelockup_detector) + corelockup_detector_offline_cpu(cpu); #endif } return 0; @@ -804,6 +807,7 @@ void __init lockup_detector_init(void) nmi_watchdog_available = true; lockup_detector_setup(); #ifdef CONFIG_CORELOCKUP_DETECTOR - corelockup_detector_init(); + if (enable_corelockup_detector) + corelockup_detector_init(); #endif } diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 58d7acec4269d..51ffc8f90520d 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -91,6 +91,14 @@ static DEFINE_PER_CPU(unsigned long, hrint_missed); struct cpumask corelockup_cpumask __read_mostly; unsigned int close_wfi_wfe; static bool pmu_based_nmi; +bool enable_corelockup_detector; + +static int __init enable_corelockup_detector_setup(char *str) +{ + enable_corelockup_detector = true; + return 1; +} +__setup("enable_corelockup_detector", enable_corelockup_detector_setup);
static void watchdog_nmi_interrupts(void) { @@ -324,11 +332,13 @@ static inline bool watchdog_check_timestamp(void) void watchdog_hardlockup_check(struct pt_regs *regs) { #ifdef CONFIG_CORELOCKUP_DETECTOR - /* Kick nmi interrupts */ - watchdog_nmi_interrupts(); + if (enable_corelockup_detector) { + /* Kick nmi interrupts */ + watchdog_nmi_interrupts();
- /* corelockup check */ - watchdog_corelockup_check(regs); + /* corelockup check */ + watchdog_corelockup_check(regs); + } #endif
if (__this_cpu_read(watchdog_nmi_touch) == true) {
From: Dong Kai dongkai11@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4F3V1 CVE: NA
--------------------------------
The corelockup detection is completed on arm64, enable it.
Signed-off-by: Dong Kai dongkai11@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/configs/hulk_defconfig | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index 605d17461383b..50cc7d95c9297 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -5540,6 +5540,7 @@ CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 CONFIG_SDEI_WATCHDOG=y # CONFIG_PMU_WATCHDOG is not set +CONFIG_CORELOCKUP_DETECTOR=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1
From: Xu Qiang xuqiang36@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4F3V1 CVE: NA
--------------------------------
Optimized core lockup detection judgment rules to make it easier to understand.
Core suspension detection is performed in the hrtimer interrupt processing function. The detection condition is that the hrtimer interrupt and NMI interrupt are not updated for multiple consecutive times.
Signed-off-by: Xu Qiang xuqiang36@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/barrier.h | 15 ------ kernel/watchdog_hld.c | 91 +++++++++----------------------- 2 files changed, 26 insertions(+), 80 deletions(-)
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 519a30346e176..3cae78c1ce33b 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -24,23 +24,8 @@ #define nops(n) asm volatile(__nops(n))
#define sev() asm volatile("sev" : : : "memory") -#ifdef CONFIG_CORELOCKUP_DETECTOR -extern unsigned int close_wfi_wfe; -#define wfe() \ - do { \ - if (likely(close_wfi_wfe == 0)) \ - asm volatile("wfe" : : : "memory"); \ - } while (0) -#define wfi() \ - do { \ - if (likely(close_wfi_wfe == 0)) \ - asm volatile("wfi" : : : "memory"); \ - } while (0) - -#else #define wfe() asm volatile("wfe" : : : "memory") #define wfi() asm volatile("wfi" : : : "memory") -#endif
#define isb() asm volatile("isb" : : : "memory") #define dmb(opt) asm volatile("dmb " #opt : : : "memory") diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 51ffc8f90520d..c71036cb474b3 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -77,7 +77,6 @@ EXPORT_SYMBOL(arch_touch_nmi_watchdog); * nmi_cnt_missed: the nmi consecutive miss counts of detector_cpu * hrint_saved: saved hrtimer interrupts of detector_cpu * hrint_missed: the hrtimer consecutive miss counts of detector_cpu - * corelockup_cpumask/close_wfi_wfe: * the cpu mask is set if certain cpu maybe fall in suspend and close * wfi/wfe mode if any bit is set */ @@ -85,12 +84,9 @@ static DEFINE_PER_CPU(unsigned int, detector_cpu); static DEFINE_PER_CPU(unsigned long, nmi_interrupts); static DEFINE_PER_CPU(unsigned long, nmi_cnt_saved); static DEFINE_PER_CPU(unsigned long, nmi_cnt_missed); -static DEFINE_PER_CPU(bool, core_watchdog_warn); static DEFINE_PER_CPU(unsigned long, hrint_saved); static DEFINE_PER_CPU(unsigned long, hrint_missed); -struct cpumask corelockup_cpumask __read_mostly; -unsigned int close_wfi_wfe; -static bool pmu_based_nmi; +static unsigned long corelockup_allcpu_dumped; bool enable_corelockup_detector;
static int __init enable_corelockup_detector_setup(char *str) @@ -150,6 +146,11 @@ void watchdog_check_hrtimer(void) { unsigned int cpu = __this_cpu_read(detector_cpu); unsigned long hrint = watchdog_hrtimer_interrupts(cpu); + unsigned long nmi_int = per_cpu(nmi_interrupts, cpu); + + /* skip check if only one cpu online */ + if (cpu == smp_processor_id()) + return;
/* * The freq of hrtimer is fast than nmi interrupts and @@ -159,23 +160,31 @@ void watchdog_check_hrtimer(void) */ watchdog_nmi_interrupts();
- if (!pmu_based_nmi) - return; - if (__this_cpu_read(hrint_saved) != hrint) { __this_cpu_write(hrint_saved, hrint); __this_cpu_write(hrint_missed, 0); - cpumask_clear_cpu(cpu, &corelockup_cpumask); - } else { - __this_cpu_inc(hrint_missed); - if (__this_cpu_read(hrint_missed) > 2) - cpumask_set_cpu(cpu, &corelockup_cpumask); + return; + } + __this_cpu_inc(hrint_missed); + + if (__this_cpu_read(nmi_cnt_saved) != nmi_int) { + __this_cpu_write(nmi_cnt_saved, nmi_int); + __this_cpu_write(nmi_cnt_missed, 0); + return; } + __this_cpu_inc(nmi_cnt_missed);
- if (likely(cpumask_empty(&corelockup_cpumask))) - close_wfi_wfe = 0; - else - close_wfi_wfe = 1; + if ((__this_cpu_read(hrint_missed) > 5) && (__this_cpu_read(nmi_cnt_missed) > 5)) { + pr_emerg("Watchdog detected core LOCKUP on cpu %d\n", cpu); + + if (!test_and_set_bit(0, &corelockup_allcpu_dumped)) { + trigger_allbutself_cpu_backtrace(); + panic("Core LOCKUP"); + } else { + while (1) + cpu_relax(); + } + } }
/* @@ -206,9 +215,6 @@ void corelockup_detector_offline_cpu(unsigned int cpu) unsigned int prev = nr_cpu_ids; unsigned int i;
- /* clear bitmap */ - cpumask_clear_cpu(cpu, &corelockup_cpumask); - /* found prev cpu */ for_each_cpu_and(i, &watchdog_cpumask, cpu_online_mask) { if (per_cpu(detector_cpu, i) == cpu) { @@ -223,45 +229,6 @@ void corelockup_detector_offline_cpu(unsigned int cpu) /* prev->next */ corelockup_status_copy(cpu, prev); } - -static bool is_corelockup(unsigned int cpu) -{ - unsigned long nmi_int = per_cpu(nmi_interrupts, cpu); - - /* skip check if only one cpu online */ - if (cpu == smp_processor_id()) - return false; - - if (__this_cpu_read(nmi_cnt_saved) != nmi_int) { - __this_cpu_write(nmi_cnt_saved, nmi_int); - __this_cpu_write(nmi_cnt_missed, 0); - per_cpu(core_watchdog_warn, cpu) = false; - return false; - } - - __this_cpu_inc(nmi_cnt_missed); - if (__this_cpu_read(nmi_cnt_missed) > 2) - return true; - - return false; -} -NOKPROBE_SYMBOL(is_corelockup); - -static void watchdog_corelockup_check(struct pt_regs *regs) -{ - unsigned int cpu = __this_cpu_read(detector_cpu); - - if (is_corelockup(cpu)) { - if (per_cpu(core_watchdog_warn, cpu) == true) - return; - pr_emerg("Watchdog detected core LOCKUP on cpu %d\n", cpu); - - if (hardlockup_panic) - nmi_panic(regs, "Core LOCKUP"); - - per_cpu(core_watchdog_warn, cpu) = true; - } -} #endif
#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP @@ -335,9 +302,6 @@ void watchdog_hardlockup_check(struct pt_regs *regs) if (enable_corelockup_detector) { /* Kick nmi interrupts */ watchdog_nmi_interrupts(); - - /* corelockup check */ - watchdog_corelockup_check(regs); } #endif
@@ -546,9 +510,6 @@ int __init hardlockup_detector_perf_init(void) perf_event_release_kernel(this_cpu_read(watchdog_ev)); this_cpu_write(watchdog_ev, NULL); } -#ifdef CONFIG_CORELOCKUP_DETECTOR - pmu_based_nmi = true; -#endif return ret; } #endif /* CONFIG_HARDLOCKUP_DETECTOR_PERF */
From: Xu Qiang xuqiang36@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4F3V1 CVE: NA
--------------------------------
A user-mode interface is added to control the core lockup detection sensitivity.
Signed-off-by: Xu Qiang xuqiang36@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/nmi.h | 1 + kernel/sysctl.c | 12 ++++++++++++ kernel/watchdog_hld.c | 4 +++- 3 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 2acb9195947a9..f2f3ee4a4458c 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -131,6 +131,7 @@ extern void corelockup_detector_offline_cpu(unsigned int cpu); extern void watchdog_check_hrtimer(void); extern unsigned long watchdog_hrtimer_interrupts(unsigned int cpu); extern bool enable_corelockup_detector; +extern int corelockup_miss_thresh; #endif
void watchdog_nmi_stop(void); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3d8c79986575a..7d3d7ad953df2 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -129,6 +129,7 @@ static int __maybe_unused one = 1; static int __maybe_unused two = 2; static int __maybe_unused three = 3; static int __maybe_unused four = 4; +static int __maybe_unused five = 5; static unsigned long zero_ul; static unsigned long one_ul = 1; static unsigned long long_max = LONG_MAX; @@ -969,6 +970,17 @@ static struct ctl_table kern_table[] = { }, #endif /* CONFIG_SMP */ #endif +#ifdef CONFIG_CORELOCKUP_DETECTOR + { + .procname = "corelockup_thresh", + .data = &corelockup_miss_thresh, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &three, + .extra2 = &five, + }, +#endif #endif
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index c71036cb474b3..8a1bf476a96cb 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -88,6 +88,7 @@ static DEFINE_PER_CPU(unsigned long, hrint_saved); static DEFINE_PER_CPU(unsigned long, hrint_missed); static unsigned long corelockup_allcpu_dumped; bool enable_corelockup_detector; +int __read_mostly corelockup_miss_thresh = 5;
static int __init enable_corelockup_detector_setup(char *str) { @@ -174,7 +175,8 @@ void watchdog_check_hrtimer(void) } __this_cpu_inc(nmi_cnt_missed);
- if ((__this_cpu_read(hrint_missed) > 5) && (__this_cpu_read(nmi_cnt_missed) > 5)) { + if ((__this_cpu_read(hrint_missed) > corelockup_miss_thresh) + && (__this_cpu_read(nmi_cnt_missed) > corelockup_miss_thresh)) { pr_emerg("Watchdog detected core LOCKUP on cpu %d\n", cpu);
if (!test_and_set_bit(0, &corelockup_allcpu_dumped)) {
From: Xu Qiang xuqiang36@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4F3V1 CVE: NA
--------------------------------
When hard lockup detection is disabled, core lockup detection is not performed.
Signed-off-by: Xu Qiang xuqiang36@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/watchdog_hld.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 8a1bf476a96cb..43832b1023693 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -153,6 +153,10 @@ void watchdog_check_hrtimer(void) if (cpu == smp_processor_id()) return;
+ /* return if hard lockup detector is disable */ + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) + return; + /* * The freq of hrtimer is fast than nmi interrupts and * the core mustn't hangs if hrtimer still working.