
From: Cheng Yu <serein.chengyu@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBWWUI CVE: NA -------------------------------- The optimizations are as follows: 1. A more reasonable algorithm for obtaining load values; 2. Limit the maximum value of sysctl_sched_util_ratio to 100; 3. If the value of sysctl_sched_util_ratio is 100, the other smt of the core will not be used. Signed-off-by: Cheng Yu <serein.chengyu@huawei.com> Signed-off-by: Yu Kuai <yukuai3@huawei.com> --- arch/arm64/Kconfig | 1 + arch/x86/configs/openeuler_defconfig | 1 - init/Kconfig | 7 +++++++ kernel/sched/fair.c | 8 ++++++-- 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7cbdd0b6259e..3c775f7e4358 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -101,6 +101,7 @@ config ARM64 select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_SCHED_KEEP_ON_CORE select ARCH_SUPPORTS_PAGE_TABLE_CHECK select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 47249c96a5d1..faa192bca7a5 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -222,7 +222,6 @@ CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_SCHED_STEAL=y -# CONFIG_SCHED_KEEP_ON_CORE is not set CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y diff --git a/init/Kconfig b/init/Kconfig index dbc0becb8d8b..4ef5f094b0ac 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1425,8 +1425,15 @@ config SCHED_STEAL If unsure, say N here. +# +# For architectures that want to enable the support for SCHED_KEEP_ON_CORE +# +config ARCH_SUPPORTS_SCHED_KEEP_ON_CORE + bool + config SCHED_KEEP_ON_CORE bool "Prefer physical cores when migrating tasks" + depends on ARCH_SUPPORTS_SCHED_KEEP_ON_CORE depends on SCHED_SMT default n help diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7183b4c0a430..b5441e3f993a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -304,6 +304,7 @@ static struct ctl_table sched_fair_sysctls[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, }, #endif #ifdef CONFIG_QOS_SCHED_SMART_GRID @@ -8131,13 +8132,16 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t #ifdef CONFIG_SCHED_KEEP_ON_CORE int sysctl_sched_util_ratio = 100; -static int core_has_spare(int cpu) +static bool core_has_spare(int cpu) { int core_id = cpumask_first(cpu_smt_mask(cpu)); struct rq *rq = cpu_rq(core_id); - unsigned long util = rq->cfs.avg.util_avg; + unsigned long util = cpu_util_cfs(cpu); unsigned long capacity = rq->cpu_capacity; + if (sysctl_sched_util_ratio == 100) + return true; + return util * 100 < capacity * sysctl_sched_util_ratio; } #endif -- 2.39.2