From: Yu Jiahua yujiahua1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4QU5Z?from=project-issue CVE: NA
--------------------------------
This patch revert ias feature from open-euler kernel.
This reverts commit ad7ae387c7ce6fe6ab4dbfe2e667f0d792fb81ca.
Signed-off-by: Yu Jiahua Yujiahua1@huawei.com Reviewed-by: Chen Hui judy.chenhui@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- include/linux/kernel.h | 2 -- init/Kconfig | 8 -------- kernel/sched/idle.c | 14 -------------- kernel/sysctl.c | 2 -- 4 files changed, 26 deletions(-)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 96696e7d832e..8242af2f6065 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -557,9 +557,7 @@ extern int sysctl_panic_on_rcu_stall; extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers; -#ifdef CONFIG_IAS_SMART_HALT_POLL extern unsigned long poll_threshold_ns; -#endif
/* diff --git a/init/Kconfig b/init/Kconfig index d2a0ac2e12db..f362f6fcc1d0 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -774,14 +774,6 @@ config GENERIC_SCHED_CLOCK
menu "Scheduler features"
-config IAS_SMART_HALT_POLL - bool "Enable smart halt poll" - default n - help - Before entering the real idle, polling for a while. if the current - task is set TIF_NEED_RESCHED during the polling process, it will - immediately break from the polling loop. - config UCLAMP_TASK bool "Enable utilization clamping for RT/FAIR tasks" depends on CPU_FREQ_GOV_SCHEDUTIL diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 0ee3e7f08ed3..ebe6c8131309 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -13,13 +13,11 @@ /* Linker adds these: start and end of __cpuidle functions */ extern char __cpuidle_text_start[], __cpuidle_text_end[];
-#ifdef CONFIG_IAS_SMART_HALT_POLL /* * Poll_threshold_ns indicates the maximum polling time before * entering real idle. */ unsigned long poll_threshold_ns; -#endif
/** * sched_idle_set_state - Record idle state for the current CPU. @@ -60,7 +58,6 @@ static int __init cpu_idle_nopoll_setup(char *__unused) __setup("hlt", cpu_idle_nopoll_setup); #endif
-#ifdef CONFIG_IAS_SMART_HALT_POLL static void smart_idle_poll(void) { unsigned long poll_duration = poll_threshold_ns; @@ -78,7 +75,6 @@ static void smart_idle_poll(void) cur = ktime_get(); } while (ktime_before(cur, stop)); } -#endif
static noinline int __cpuidle cpu_idle_poll(void) { @@ -86,9 +82,7 @@ static noinline int __cpuidle cpu_idle_poll(void) stop_critical_timings(); rcu_idle_enter(); local_irq_enable(); -#ifdef CONFIG_IAS_SMART_HALT_POLL smart_idle_poll(); -#endif
while (!tif_need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired())) @@ -292,9 +286,7 @@ static void cpuidle_idle_call(void) static void do_idle(void) { int cpu = smp_processor_id(); -#ifdef CONFIG_IAS_SMART_HALT_POLL unsigned long idle_poll_flag = poll_threshold_ns; -#endif /* * If the arch has a polling bit, we maintain an invariant: * @@ -327,17 +319,11 @@ static void do_idle(void) * broadcast device expired for us, we don't want to go deep * idle as we know that the IPI is going to arrive right away. */ -#ifdef CONFIG_IAS_SMART_HALT_POLL if (cpu_idle_force_poll || tick_check_broadcast_expired() || idle_poll_flag) { -#else - if (cpu_idle_force_poll || tick_check_broadcast_expired()) { -#endif tick_nohz_idle_restart_tick(); cpu_idle_poll(); -#ifdef CONFIG_IAS_SMART_HALT_POLL idle_poll_flag = 0; -#endif } else { cpuidle_idle_call(); } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1b640c8c147b..571b3eb6dfe0 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1858,7 +1858,6 @@ static struct ctl_table kern_table[] = { .proc_handler = sysctl_sched_uclamp_handler, }, #endif -#ifdef CONFIG_IAS_SMART_HALT_POLL { .procname = "halt_poll_threshold", .data = &poll_threshold_ns, @@ -1866,7 +1865,6 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, -#endif #ifdef CONFIG_SCHED_AUTOGROUP { .procname = "sched_autogroup_enabled",