
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IC8X6H -------------------------------- Signed-off-by: Zhang Qiao <zhangqiao22@huawei.com> --- kernel/sched/fair.c | 65 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 62 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 56f407770be0..5a2fb1734dc2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7109,6 +7109,55 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, static struct sched_group * find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu); +#ifdef CONFIG_SCHED_SOFT_DOMAIN +static inline bool sched_group_sf_preferred(struct task_struct *p, struct sched_group *group) +{ + struct soft_domain_ctx *ctx = NULL; + + if (!sched_feat(SOFT_DOMAIN)) + return true; + + ctx = task_group(p)->sf_ctx; + if (!ctx || ctx->policy == 0) + return true; + + if (!cpumask_intersects(sched_group_span(group), to_cpumask(ctx->span))) + return false; + + return true; +} + +static inline bool cpu_is_sf_preferred(struct task_struct *p, int cpu) +{ + struct soft_domain_ctx *ctx = NULL; + + if (!sched_feat(SOFT_DOMAIN)) + return true; + + ctx = task_group(p)->sf_ctx; + if (!ctx || ctx->policy == 0) + return true; + + if (!cpumask_test_cpu(cpu, to_cpumask(ctx->span))) + return false; + + return true; +} +#else + +static inline bool sched_group_sf_preferred(struct task_struct *p, struct sched_group *group) +{ + return true; +} + +static inline bool cpu_is_sf_preferred(struct task_struct *p, int cpu) +{ + return true; +} + +#endif + + /* * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. */ @@ -7137,6 +7186,9 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this if (!sched_core_cookie_match(rq, p)) continue; + if (!cpu_is_sf_preferred(p, i)) + continue; + if (sched_idle_cpu(i)) return i; @@ -8240,7 +8292,7 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, #endif #ifdef CONFIG_SCHED_SOFT_DOMAIN -static int wake_soft_domain(struct task_struct *p, int target) +static int wake_soft_domain(struct task_struct *p, int target, int *cpu, int sd_flags) { struct cpumask *mask = this_cpu_cpumask_var_ptr(select_idle_mask); struct soft_domain_ctx *ctx = NULL; @@ -8256,10 +8308,13 @@ static int wake_soft_domain(struct task_struct *p, int target) #endif cpumask_and(mask, mask, cpu_active_mask); if (cpumask_empty(mask) || cpumask_test_cpu(target, mask)) - goto out; + goto prefer; else target = cpumask_any_and_distribute(mask, mask); +prefer: + if (sd_flags & SD_BALANCE_FORK) + *cpu = target; out: return target; @@ -8331,7 +8386,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f #ifdef CONFIG_SCHED_SOFT_DOMAIN if (sched_feat(SOFT_DOMAIN)) - new_cpu = prev_cpu = wake_soft_domain(p, prev_cpu); + new_cpu = prev_cpu = wake_soft_domain(p, prev_cpu, &cpu, sd_flag); #endif #ifdef CONFIG_BPF_SCHED if (bpf_sched_enabled()) { @@ -11321,6 +11376,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) continue; + /* Skip over this group if not in soft domain */ + if (!sched_group_sf_preferred(p, group)) + continue; + local_group = cpumask_test_cpu(this_cpu, sched_group_span(group)); -- 2.18.0.huawei.25