From: Yu Jiahua yujiahua1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4QU5Z?from=project-issue CVE: NA
--------------------------------
This patch revert ias feature from open-euler kernel.
This reverts commit 189fa7a4152f63f81734000c3fbae206844313fa.
Signed-off-by: Yu Jiahua Yujiahua1@huawei.com Reviewed-by: Chen Hui judy.chenhui@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- include/linux/kernel.h | 2 +- include/linux/sched/sysctl.h | 2 +- init/Kconfig | 36 +++++++--------- kernel/sched/fair.c | 14 +++--- kernel/sched/idle.c | 12 +++--- kernel/sysctl.c | 82 ++++++++++++++++-------------------- 6 files changed, 67 insertions(+), 81 deletions(-)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index dace1d9dffa6..96696e7d832e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -557,7 +557,7 @@ extern int sysctl_panic_on_rcu_stall; extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers; -#ifdef CONFIG_IAS_SMART_IDLE +#ifdef CONFIG_IAS_SMART_HALT_POLL extern unsigned long poll_threshold_ns; #endif
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 271847c1b4d8..f9de0db47267 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -103,7 +103,7 @@ int sched_energy_aware_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); #endif
-#ifdef CONFIG_IAS_SMART_LOAD_TRACKING +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING extern int sysctl_blocked_averages(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int sysctl_tick_update_load(struct ctl_table *table, int write, diff --git a/init/Kconfig b/init/Kconfig index 6d2f69cafe15..dbbf73085919 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -774,6 +774,22 @@ config GENERIC_SCHED_CLOCK
menu "Scheduler features"
+config SCHED_OPTIMIZE_LOAD_TRACKING + bool "Optimize scheduler load tracking" + default n + help + Optimize scheduler load tracking, when load balance is not important + in system, we close some load tracking in tick and enqueue or dequeue + task, in this way, we can save some unnecessary cpu overhead. + +config IAS_SMART_HALT_POLL + bool "Enable smart halt poll" + default n + help + Before entering the real idle, polling for a while. if the current + task is set TIF_NEED_RESCHED during the polling process, it will + immediately break from the polling loop. + config UCLAMP_TASK bool "Enable utilization clamping for RT/FAIR tasks" depends on CPU_FREQ_GOV_SCHEDUTIL @@ -823,26 +839,6 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value.
-menu "Intelligent aware scheduler" - -config IAS_SMART_IDLE - bool "Enable smart idle" - default n - help - Before entering the real idle, polling for a while. if the current - task is set TIF_NEED_RESCHED during the polling process, it will - immediately break from the polling loop. - -config IAS_SMART_LOAD_TRACKING - bool "Enable smart load tracking" - default n - help - Optimize scheduler load tracking, when load balance is not important - in system, we close some load tracking in tick and enqueue or dequeue - task, in this way, we can save some unnecessary cpu overhead. - -endmenu - endmenu
# diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 879cdcb243d1..96e8b479c88c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -44,7 +44,7 @@ unsigned int sysctl_sched_latency = 6000000ULL; static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
-#ifdef CONFIG_IAS_SMART_LOAD_TRACKING +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING #define LANTENCY_MIN 10 #define LANTENCY_MAX 30 unsigned int sysctl_load_tracking_latency = LANTENCY_MIN; @@ -3848,7 +3848,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s { u64 now = cfs_rq_clock_pelt(cfs_rq); int decayed; -#ifdef CONFIG_IAS_SMART_LOAD_TRACKING +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING u64 delta; #endif
@@ -3856,7 +3856,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s * Track task load average for carrying it to new CPU after migrated, and * track group sched_entity load average for task_h_load calc in migration */ -#ifdef CONFIG_IAS_SMART_LOAD_TRACKING +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING delta = now - se->avg.last_update_time; delta >>= sysctl_load_tracking_latency;
@@ -4681,7 +4681,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) cfs_rq->curr = NULL; }
-#ifdef CONFIG_IAS_SMART_LOAD_TRACKING +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING DEFINE_STATIC_KEY_TRUE(sched_tick_update_load); static void set_tick_update_load(bool enabled) { @@ -4724,7 +4724,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) /* * Ensure that runnable average is periodically updated. */ -#ifdef CONFIG_IAS_SMART_LOAD_TRACKING +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING if (static_branch_likely(&sched_tick_update_load)) { update_load_avg(cfs_rq, curr, UPDATE_TG); update_cfs_group(curr); @@ -8339,7 +8339,7 @@ static void attach_tasks(struct lb_env *env) rq_unlock(env->dst_rq, &rf); }
-#ifdef CONFIG_IAS_SMART_LOAD_TRACKING +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING DEFINE_STATIC_KEY_TRUE(sched_blocked_averages);
static void set_blocked_averages(bool enabled) @@ -8575,7 +8575,7 @@ static void update_blocked_averages(int cpu) rq_lock_irqsave(rq, &rf); update_rq_clock(rq);
-#ifdef CONFIG_IAS_SMART_LOAD_TRACKING +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING if (!static_branch_likely(&sched_blocked_averages)) { rq_unlock_irqrestore(rq, &rf); return; diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 66ccc6d38853..0ee3e7f08ed3 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -13,7 +13,7 @@ /* Linker adds these: start and end of __cpuidle functions */ extern char __cpuidle_text_start[], __cpuidle_text_end[];
-#ifdef CONFIG_IAS_SMART_IDLE +#ifdef CONFIG_IAS_SMART_HALT_POLL /* * Poll_threshold_ns indicates the maximum polling time before * entering real idle. @@ -60,7 +60,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused) __setup("hlt", cpu_idle_nopoll_setup); #endif
-#ifdef CONFIG_IAS_SMART_IDLE +#ifdef CONFIG_IAS_SMART_HALT_POLL static void smart_idle_poll(void) { unsigned long poll_duration = poll_threshold_ns; @@ -86,7 +86,7 @@ static noinline int __cpuidle cpu_idle_poll(void) stop_critical_timings(); rcu_idle_enter(); local_irq_enable(); -#ifdef CONFIG_IAS_SMART_IDLE +#ifdef CONFIG_IAS_SMART_HALT_POLL smart_idle_poll(); #endif
@@ -292,7 +292,7 @@ static void cpuidle_idle_call(void) static void do_idle(void) { int cpu = smp_processor_id(); -#ifdef CONFIG_IAS_SMART_IDLE +#ifdef CONFIG_IAS_SMART_HALT_POLL unsigned long idle_poll_flag = poll_threshold_ns; #endif /* @@ -327,7 +327,7 @@ static void do_idle(void) * broadcast device expired for us, we don't want to go deep * idle as we know that the IPI is going to arrive right away. */ -#ifdef CONFIG_IAS_SMART_IDLE +#ifdef CONFIG_IAS_SMART_HALT_POLL if (cpu_idle_force_poll || tick_check_broadcast_expired() || idle_poll_flag) { #else @@ -335,7 +335,7 @@ static void do_idle(void) #endif tick_nohz_idle_restart_tick(); cpu_idle_poll(); -#ifdef CONFIG_IAS_SMART_IDLE +#ifdef CONFIG_IAS_SMART_HALT_POLL idle_poll_flag = 0; #endif } else { diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 90d9aff1bdb1..1b640c8c147b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1659,46 +1659,6 @@ int proc_do_static_key(struct ctl_table *table, int write, mutex_unlock(&static_key_mutex); return ret; } -static struct ctl_table ias_table[] = { -#ifdef CONFIG_IAS_SMART_IDLE - { - .procname = "smart_idle_threshold", - .data = &poll_threshold_ns, - .maxlen = sizeof(unsigned long), - .mode = 0644, - .proc_handler = proc_doulongvec_minmax, - }, -#endif - -#ifdef CONFIG_IAS_SMART_LOAD_TRACKING - { - .procname = "sched_blocked_averages", - .data = NULL, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sysctl_blocked_averages, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, - { - .procname = "sched_tick_update_load", - .data = NULL, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sysctl_tick_update_load, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, - { - .procname = "sched_load_tracking_latency", - .data = NULL, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sysctl_update_load_latency, - }, -#endif - { } -};
static struct ctl_table kern_table[] = { { @@ -1813,6 +1773,33 @@ static struct ctl_table kern_table[] = { }, #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING + { + .procname = "sched_blocked_averages", + .data = NULL, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sysctl_blocked_averages, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "sched_tick_update_load", + .data = NULL, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sysctl_tick_update_load, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "sched_load_tracking_latency", + .data = NULL, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sysctl_update_load_latency, + }, +#endif { .procname = "sched_rt_period_us", .data = &sysctl_sched_rt_period, @@ -1871,7 +1858,15 @@ static struct ctl_table kern_table[] = { .proc_handler = sysctl_sched_uclamp_handler, }, #endif - +#ifdef CONFIG_IAS_SMART_HALT_POLL + { + .procname = "halt_poll_threshold", + .data = &poll_threshold_ns, + .maxlen = sizeof(unsigned long), + .mode = 0644, + .proc_handler = proc_doulongvec_minmax, + }, +#endif #ifdef CONFIG_SCHED_AUTOGROUP { .procname = "sched_autogroup_enabled", @@ -2713,11 +2708,6 @@ static struct ctl_table kern_table[] = { .extra2 = SYSCTL_ONE, }, #endif - { - .procname = "ias", - .mode = 0555, - .child = ias_table, - }, #ifdef CONFIG_QOS_SCHED { .procname = "qos_overload_detect_period_ms",