hulk inclusion category: perf bugzilla: https://atomgit.com/openeuler/kernel/issues/8555 -------------------------------- Implement fast load balancing with steal-task mechanism within the LLC domain; if task pulling fails, fall back to the native newidle_balance mechanism on LLC and upper scheduling domains. This optimization prioritizes task scheduling within the LLC domain to improve cache hit rate, while mitigating system load imbalance issues. Signed-off-by: Chen Jinghuang <chenjinghuang2@huawei.com> --- kernel/sched/fair.c | 21 +++++++++++++++++++-- kernel/sched/features.h | 1 + 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e60f19cb0fee..a4592d35da8b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5236,6 +5236,11 @@ static inline bool steal_enabled(void) return sched_feat(STEAL) && allow; } +static inline bool steal_fail_ni_enabled(void) +{ + return sched_feat(STEAL_FAIL_NI); +} + static void overload_clear(struct rq *rq) { struct sparsemask *overload_cpus; @@ -5275,6 +5280,7 @@ static int try_steal(struct rq *this_rq, struct rq_flags *rf); static inline int try_steal(struct rq *this_rq, struct rq_flags *rf) { return 0; } static inline void overload_clear(struct rq *rq) {} static inline void overload_set(struct rq *rq) {} +static inline bool steal_fail_ni_enabled(void) { return false; } #endif #else /* CONFIG_SMP */ @@ -10343,9 +10349,15 @@ done: __maybe_unused; */ rq_idle_stamp_update(rq); - new_tasks = newidle_balance(rq, rf); - if (new_tasks == 0) + if (steal_fail_ni_enabled()) { new_tasks = try_steal(rq, rf); + if (new_tasks == 0) + new_tasks = newidle_balance(rq, rf); + } else { + new_tasks = newidle_balance(rq, rf); + if (new_tasks == 0) + new_tasks = try_steal(rq, rf); + } schedstat_end_time(rq, time); if (new_tasks) @@ -14313,6 +14325,11 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) int continue_balancing = 1; u64 domain_cost; + if (steal_fail_ni_enabled()) { + if (sd->flags & SD_SHARE_PKG_RESOURCES) + continue; + } + update_next_balance(sd, &next_balance); if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 1f665fbf0137..52ea0097c513 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -63,6 +63,7 @@ SCHED_FEAT(SIS_UTIL, true) * Improves CPU utilization. */ SCHED_FEAT(STEAL, false) +SCHED_FEAT(STEAL_FAIL_NI, false) #endif #ifdef CONFIG_SCHED_PARAL -- 2.33.0