
From: He Yujie <coka.heyujie@huawei.com> Offering: HULK hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IB42TC -------------------------------- After the preferred_cpu goes offline, the core selection of dynamic affinity does not check whether the preferred_cpu is valid. As a result, related dynamic affinity processes are concentrated on a shared_cpu. This patch resolves this problem to checks whether the preferred_cpu is valid and compares only the usage threshold of the valid preferred_cpu. Fixes: 2e1dfc02d115 ("sched: Adjust wakeup cpu range according CPU util dynamicly") Signed-off-by: He Yujie <coka.heyujie@huawei.com> Signed-off-by: Cheng Yu <serein.chengyu@huawei.com> --- kernel/sched/fair.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fdb352c870ea..f83094121207 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9149,7 +9149,7 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, /* manual mode */ tg = task_group(p); - for_each_cpu(cpu, p->prefer_cpus) { + for_each_cpu_and(cpu, p->prefer_cpus, cpu_online_mask) { if (idlest_cpu && (available_idle_cpu(cpu) || sched_idle_cpu(cpu))) { *idlest_cpu = cpu; } else if (idlest_cpu) { @@ -9174,8 +9174,7 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, } rcu_read_unlock(); - if (tg_capacity > cpumask_weight(p->prefer_cpus) && - util_avg_sum * 100 <= tg_capacity * sysctl_sched_util_low_pct) { + if (util_avg_sum * 100 < tg_capacity * sysctl_sched_util_low_pct) { p->select_cpus = p->prefer_cpus; if (sd_flag & SD_BALANCE_WAKE) schedstat_inc(p->stats.nr_wakeups_preferred_cpus); -- 2.25.1