
From: Cheng Yu <serein.chengyu@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAJEHU CVE: NA -------------------------------- When cpu hyperthreading is enabled, one physical core can virtualize multiple logical cpus. Assume that physical core0 virtualizes two logical cpus, cpu0 and cpu1. Only when the load of cpu0 exceeds the set ratio to the capacity of cpu0, the task will be migrated to the cpu1, otherwise the task will not be migrated and the cpu0 will still be used. External impacts: 1) default config in arm64,x86: CONFIG_SCHED_KEEP_ON_CORE=y 2) sysctl: /proc/sys/kernel/sched_util_ratio 3) sched features: KEEP_ON_CORE (default NO_KEEP_ON_CORE) Signed-off-by: Cheng Yu <serein.chengyu@huawei.com> Signed-off-by: Yu Kuai <yukuai3@huawei.com> --- arch/arm64/configs/openeuler_defconfig | 1 + arch/x86/configs/openeuler_defconfig | 1 + include/linux/sched/sysctl.h | 4 +++ init/Kconfig | 12 ++++++++ kernel/sched/fair.c | 42 ++++++++++++++++++++++++++ kernel/sched/features.h | 4 +++ 6 files changed, 64 insertions(+) diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index f712ec22001b..785cc95bada1 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -204,6 +204,7 @@ CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_SCHED_STEAL=y +CONFIG_SCHED_KEEP_ON_CORE=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index faa192bca7a5..47249c96a5d1 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -222,6 +222,7 @@ CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_SCHED_STEAL=y +# CONFIG_SCHED_KEEP_ON_CORE is not set CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 0aa95deb155e..e3a376ab4110 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -35,4 +35,8 @@ int sched_cluster_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); #endif +#ifdef CONFIG_SCHED_KEEP_ON_CORE +extern int sysctl_sched_util_ratio; +#endif + #endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/init/Kconfig b/init/Kconfig index 4c566c4bbfa4..dbc0becb8d8b 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1425,6 +1425,18 @@ config SCHED_STEAL If unsure, say N here. +config SCHED_KEEP_ON_CORE + bool "Prefer physical cores when migrating tasks" + depends on SCHED_SMT + default n + help + When cpu hyperthreading is enabled, one physical core can virtualize + multiple logical cpus. Assume that physical core0 virtualizes two + logical cpus, cpu0 and cpu1. Only when the load of cpu0 exceeds the + ratio to the capacity of cpu0, the task will be migrated to the cpu1, + otherwise the task will not be migrated and the cpu0 will still be + used. + config CHECKPOINT_RESTORE bool "Checkpoint/restore support" depends on PROC_FS diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c530d501bb48..7183b4c0a430 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -296,6 +296,16 @@ static struct ctl_table sched_fair_sysctls[] = { .proc_handler = proc_dointvec, }, #endif +#ifdef CONFIG_SCHED_KEEP_ON_CORE + { + .procname = "sched_util_ratio", + .data = &sysctl_sched_util_ratio, + .maxlen = sizeof(sysctl_sched_util_ratio), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, +#endif #ifdef CONFIG_QOS_SCHED_SMART_GRID { .procname = "smart_grid_strategy_ctrl", @@ -8118,6 +8128,20 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t return -1; } +#ifdef CONFIG_SCHED_KEEP_ON_CORE +int sysctl_sched_util_ratio = 100; + +static int core_has_spare(int cpu) +{ + int core_id = cpumask_first(cpu_smt_mask(cpu)); + struct rq *rq = cpu_rq(core_id); + unsigned long util = rq->cfs.avg.util_avg; + unsigned long capacity = rq->cpu_capacity; + + return util * 100 < capacity * sysctl_sched_util_ratio; +} +#endif + #else /* CONFIG_SCHED_SMT */ static inline void set_idle_cores(int cpu, int val) @@ -9234,6 +9258,15 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) /* Fast path */ new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); } + +#ifdef CONFIG_SCHED_KEEP_ON_CORE + if (sched_feat(KEEP_ON_CORE) && + static_branch_likely(&sched_smt_present)) { + if (core_has_spare(new_cpu)) + new_cpu = cpumask_first(cpu_smt_mask((new_cpu))); + } +#endif + rcu_read_unlock(); schedstat_end_time(cpu_rq(cpu), time); @@ -10600,6 +10633,15 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) } #endif +#ifdef CONFIG_SCHED_KEEP_ON_CORE + if (sched_feat(KEEP_ON_CORE) && + static_branch_likely(&sched_smt_present)) { + if (core_has_spare(env->dst_cpu) && + cpumask_first(cpu_smt_mask((env->dst_cpu))) != env->dst_cpu) + return 0; + } +#endif + /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 26b1a03bd3d2..71d101296865 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -61,6 +61,10 @@ SCHED_FEAT(SIS_UTIL, true) SCHED_FEAT(STEAL, false) #endif +#ifdef CONFIG_SCHED_KEEP_ON_CORE +SCHED_FEAT(KEEP_ON_CORE, false) +#endif + /* * Issue a WARN when we do multiple update_rq_clock() calls * in a single rq->lock section. Default disabled because the -- 2.39.2