From: Yu Jiahua yujiahua1@huawei.com
hulk inclusion category: feature bugzilla: 175551 https://gitee.com/openeuler/kernel/issues/I4DVI3
--------------------------------
Avaliable for switch on/off for update_blocked_averages, it helps reducing cpu usage when system in heavy load.
Signed-off-by: Yu Jiahua yujiahua1@huawei.com Reviewed-by: Chen Hui judy.chenhui@huawei.com Signed-off-by: Chen Jun chenjun102@huawei.com --- include/linux/sched/sysctl.h | 7 +++++++ kernel/sched/fair.c | 40 ++++++++++++++++++++++++++++++++++++ kernel/sysctl.c | 11 ++++++++++ 3 files changed, 58 insertions(+)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 3c31ba88aca5..f07787335bf9 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -98,4 +98,11 @@ int sched_energy_aware_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); #endif
+#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING +extern int sysctl_blocked_averages(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + +extern struct static_key_true sched_blocked_averages; +#endif + #endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7e8c23119867..c2597f5ddeea 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8001,6 +8001,39 @@ static void attach_tasks(struct lb_env *env) rq_unlock(env->dst_rq, &rf); }
+#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING +DEFINE_STATIC_KEY_TRUE(sched_blocked_averages); + +static void set_blocked_averages(bool enabled) +{ + if (enabled) + static_branch_enable(&sched_blocked_averages); + else + static_branch_disable(&sched_blocked_averages); +} + +int sysctl_blocked_averages(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table t; + int err; + int state = static_branch_likely(&sched_blocked_averages); + + if (write && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + t = *table; + t.data = &state; + err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); + if (err < 0) + return err; + if (write) + set_blocked_averages(state); + + return err; +} +#endif + #ifdef CONFIG_NO_HZ_COMMON static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { @@ -8204,6 +8237,13 @@ static void update_blocked_averages(int cpu) rq_lock_irqsave(rq, &rf); update_rq_clock(rq);
+#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING + if (!static_branch_unlikely(&sched_blocked_averages)) { + rq_unlock_irqrestore(rq, &rf); + return; + } +#endif + decayed |= __update_blocked_others(rq, &done); decayed |= __update_blocked_fair(rq, &done);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index d88c28573393..7c12f9f50ef5 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1764,6 +1764,17 @@ static struct ctl_table kern_table[] = { }, #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ +#ifdef CONFIG_SCHED_OPTIMIZE_LOAD_TRACKING + { + .procname = "sched_blocked_averages", + .data = NULL, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sysctl_blocked_averages, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif { .procname = "sched_rt_period_us", .data = &sysctl_sched_rt_period,