[PATCH] Prefer steal task within LLC and fallback to native newidle_balance on failure
--- kernel/sched/fair.c | 15 +++++++++++++-- kernel/sched/features.h | 1 + 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e60f19cb0fee..70ba6ee3c058 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10343,9 +10343,15 @@ done: __maybe_unused; */ rq_idle_stamp_update(rq); - new_tasks = newidle_balance(rq, rf); - if (new_tasks == 0) + if (sched_feat(STEALPOC)) { new_tasks = try_steal(rq, rf); + if (new_tasks == 0) + new_tasks = newidle_balance(rq, rf); + } else { + new_tasks = newidle_balance(rq, rf); + if (new_tasks == 0) + new_tasks = try_steal(rq, rf); + } schedstat_end_time(rq, time); if (new_tasks) @@ -14313,6 +14319,11 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) int continue_balancing = 1; u64 domain_cost; + if (sched_feat(STEALPOC)) { + if (sd->flags & SD_SHARE_PKG_RESOURCES) + continue; + } + update_next_balance(sd, &next_balance); if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 1f665fbf0137..c74329fd86b9 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -63,6 +63,7 @@ SCHED_FEAT(SIS_UTIL, true) * Improves CPU utilization. */ SCHED_FEAT(STEAL, false) +SCHED_FEAT(STEALPOC, false) #endif #ifdef CONFIG_SCHED_PARAL -- 2.34.1
participants (1)
-
Chen Jinghuang