[PATCH OLK-6.6 V1] sched/fair: Prefer physical cores when migrating tasks
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAJEHU -------------------------------- When cpu hyperthreading is enabled, one physical core can virtualize multiple logical cpus. Assume that physical core0 virtualizes two logical cpus, cpu0 and cpu1. Only when the load of cpu0 exceeds the set ratio to the capacity of cpu0, the task will be migrated to the cpu1, otherwise the task will not be migrated and the cpu0 will still be used. External impacts: 1) default config: CONFIG_SCHED_KEEP_ON_CORE=y 2) sysctl: /proc/sys/kernel/sched_util_ratio 3) sched features: KEEP_ON_CORE (default NO_KEEP_ON_CORE) Signed-off-by: Cheng Yu <serein.chengyu@huawei.com> --- arch/arm64/Kconfig | 1 + arch/arm64/configs/openeuler_defconfig | 1 + init/Kconfig | 19 +++++++++++ kernel/sched/fair.c | 47 ++++++++++++++++++++++++++ kernel/sched/features.h | 4 +++ 5 files changed, 72 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index dec41d9fa054..8ad5692a460c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -107,6 +107,7 @@ config ARM64 select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_SCHED_PARAL select ARCH_SUPPORTS_SCHED_SOFT_QUOTA + select ARCH_SUPPORTS_SCHED_KEEP_ON_CORE select ARCH_SUPPORTS_PAGE_TABLE_CHECK select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index ce292ed39752..57dcf692d82e 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -211,6 +211,7 @@ CONFIG_NET_NS=y CONFIG_SCHED_STEAL=y CONFIG_SCHED_PARAL=y CONFIG_SCHED_SOFT_QUOTA=y +CONFIG_SCHED_KEEP_ON_CORE=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y diff --git a/init/Kconfig b/init/Kconfig index 2720083aaa17..2322830cb4ce 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1192,6 +1192,25 @@ config SCHED_SOFT_QUOTA restrictions on the use of the CPU quota, such as some commercial scenarios that charge based on the use of CPU quota. +# +# For architectures that want to enable the support for SCHED_KEEP_ON_CORE +# +config ARCH_SUPPORTS_SCHED_KEEP_ON_CORE + bool + +config SCHED_KEEP_ON_CORE + bool "Prefer physical cores when migrating tasks" + depends on ARCH_SUPPORTS_SCHED_KEEP_ON_CORE + depends on SCHED_SMT + default n + help + When cpu hyperthreading is enabled, one physical core can virtualize + multiple logical cpus. Assume that physical core0 virtualizes two + logical cpus, cpu0 and cpu1. Only when the load of cpu0 exceeds the + ratio to the capacity of cpu0, the task will be migrated to the cpu1, + otherwise the task will not be migrated and the cpu0 will still be + used. + config SCHED_MM_CID def_bool n depends on SMP && RSEQ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b22f3c072d20..d0ee00a018b0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -231,6 +231,10 @@ static int sysctl_affinity_adjust_delay_ms = 5000; unsigned int sysctl_soft_runtime_ratio = 20; #endif +#ifdef CONFIG_SCHED_KEEP_ON_CORE +int sysctl_sched_util_ratio = 100; +#endif + #ifdef CONFIG_SYSCTL static struct ctl_table sched_fair_sysctls[] = { { @@ -341,6 +345,17 @@ static struct ctl_table sched_fair_sysctls[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_ONE_HUNDRED, }, +#endif +#ifdef CONFIG_SCHED_KEEP_ON_CORE + { + .procname = "sched_util_ratio", + .data = &sysctl_sched_util_ratio, + .maxlen = sizeof(sysctl_sched_util_ratio), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, + }, #endif {} }; @@ -8784,6 +8799,20 @@ static __always_inline unsigned long cpu_util_without(int cpu, struct task_struc return cpu_util(cpu, p, -1, 0); } +#ifdef CONFIG_SCHED_KEEP_ON_CORE +static bool core_has_spare(int cpu) +{ + int core_id = cpumask_first(cpu_smt_mask(cpu)); + unsigned long util = cpu_util_cfs(core_id); + unsigned long capacity = capacity_of(core_id); + + if (sysctl_sched_util_ratio == 100) + return true; + + return util * 100 < capacity * sysctl_sched_util_ratio; +} +#endif + /* * energy_env - Utilization landscape for energy estimation. * @task_busy_time: Utilization contribution by the task for which we test the @@ -9453,6 +9482,15 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) /* Fast path */ new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); } + +#ifdef CONFIG_SCHED_KEEP_ON_CORE + if (sched_feat(KEEP_ON_CORE) && + static_branch_likely(&sched_smt_present)) { + if (core_has_spare(new_cpu)) + new_cpu = cpumask_first(cpu_smt_mask((new_cpu))); + } +#endif + rcu_read_unlock(); schedstat_end_time(cpu_rq(cpu), time); @@ -10825,6 +10863,15 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) } #endif +#ifdef CONFIG_SCHED_KEEP_ON_CORE + if (sched_feat(KEEP_ON_CORE) && + static_branch_likely(&sched_smt_present)) { + if (core_has_spare(env->dst_cpu) && + cpumask_first(cpu_smt_mask((env->dst_cpu))) != env->dst_cpu) + return 0; + } +#endif + /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or diff --git a/kernel/sched/features.h b/kernel/sched/features.h index b95797360dd6..1f135b0ea6fd 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -65,6 +65,10 @@ SCHED_FEAT(STEAL, false) SCHED_FEAT(PARAL, false) #endif +#ifdef CONFIG_SCHED_KEEP_ON_CORE +SCHED_FEAT(KEEP_ON_CORE, false) +#endif + /* * Issue a WARN when we do multiple update_rq_clock() calls * in a single rq->lock section. Default disabled because the -- 2.25.1
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/18676 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/T2F... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/18676 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/T2F...
participants (2)
-
Cheng Yu -
patchwork bot