From: Li Hua hucool.lihua@huawei.com
hulk inclusion category: feature bugzilla: 176961 https://gitee.com/openeuler/kernel/issues/I4E05T
-------------------
Add IAS_SMART_HALT_POLL config for smart halt polling feature
Signed-off-by: Li Hua hucool.lihua@huawei.com Reviewed-by: Chen Hui judy.chenhui@huawei.com Signed-off-by: Chen Jun chenjun102@huawei.com --- include/linux/kernel.h | 2 ++ init/Kconfig | 8 ++++++++ kernel/sched/idle.c | 14 ++++++++++++++ kernel/sysctl.c | 2 ++ 4 files changed, 26 insertions(+)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 63555b947d96..b8cce99fd8eb 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -555,7 +555,9 @@ extern int sysctl_panic_on_rcu_stall; extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers; +#ifdef CONFIG_IAS_SMART_HALT_POLL extern unsigned long poll_threshold_ns; +#endif
/* diff --git a/init/Kconfig b/init/Kconfig index 66d7f2708b95..cedc5ab247a5 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -774,6 +774,14 @@ config GENERIC_SCHED_CLOCK
menu "Scheduler features"
+config IAS_SMART_HALT_POLL + bool "Enable smart halt poll" + default n + help + Before entering the real idle, polling for a while. if the current + task is set TIF_NEED_RESCHED during the polling process, it will + immediately break from the polling loop. + config UCLAMP_TASK bool "Enable utilization clamping for RT/FAIR tasks" depends on CPU_FREQ_GOV_SCHEDUTIL diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 0bd53051ac71..4f7b0ee06144 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -13,11 +13,13 @@ /* Linker adds these: start and end of __cpuidle functions */ extern char __cpuidle_text_start[], __cpuidle_text_end[];
+#ifdef CONFIG_IAS_SMART_HALT_POLL /* * Poll_threshold_ns indicates the maximum polling time before * entering real idle. */ unsigned long poll_threshold_ns; +#endif
/** * sched_idle_set_state - Record idle state for the current CPU. @@ -58,6 +60,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused) __setup("hlt", cpu_idle_nopoll_setup); #endif
+#ifdef CONFIG_IAS_SMART_HALT_POLL static void smart_idle_poll(void) { unsigned long poll_duration = poll_threshold_ns; @@ -75,6 +78,7 @@ static void smart_idle_poll(void) cur = ktime_get(); } while (ktime_before(cur, stop)); } +#endif
static noinline int __cpuidle cpu_idle_poll(void) { @@ -82,7 +86,9 @@ static noinline int __cpuidle cpu_idle_poll(void) stop_critical_timings(); rcu_idle_enter(); local_irq_enable(); +#ifdef CONFIG_IAS_SMART_HALT_POLL smart_idle_poll(); +#endif
while (!tif_need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired())) @@ -286,7 +292,9 @@ static void cpuidle_idle_call(void) static void do_idle(void) { int cpu = smp_processor_id(); +#ifdef CONFIG_IAS_SMART_HALT_POLL unsigned long idle_poll_flag = poll_threshold_ns; +#endif /* * If the arch has a polling bit, we maintain an invariant: * @@ -319,11 +327,17 @@ static void do_idle(void) * broadcast device expired for us, we don't want to go deep * idle as we know that the IPI is going to arrive right away. */ +#ifdef CONFIG_IAS_SMART_HALT_POLL if (cpu_idle_force_poll || tick_check_broadcast_expired() || idle_poll_flag) { +#else + if (cpu_idle_force_poll || tick_check_broadcast_expired()) { +#endif tick_nohz_idle_restart_tick(); cpu_idle_poll(); +#ifdef CONFIG_IAS_SMART_HALT_POLL idle_poll_flag = 0; +#endif } else { cpuidle_idle_call(); } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 261388afe482..a573817a6fe0 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1849,6 +1849,7 @@ static struct ctl_table kern_table[] = { .proc_handler = sysctl_sched_uclamp_handler, }, #endif +#ifdef CONFIG_IAS_SMART_HALT_POLL { .procname = "halt_poll_threshold", .data = &poll_threshold_ns, @@ -1856,6 +1857,7 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, +#endif #ifdef CONFIG_SCHED_AUTOGROUP { .procname = "sched_autogroup_enabled",