From: Vineeth Pillai viremana@linux.microsoft.com
mainline inclusion from mainline-v5.14-rc1 commit 8039e96fcc1de30d5bcaf05da9ca2de46a800826 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I5OOWG CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------------------------------------------------
If there is only one long running local task and the sibling is forced idle, it might not get a chance to run until a schedule event happens on any cpu in the core.
So we check for this condition during a tick to see if a sibling is starved and then give it a chance to schedule.
Signed-off-by: Vineeth Pillai viremana@linux.microsoft.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Tested-by: Don Hiatt dhiatt@digitalocean.com Tested-by: Hongyu Ning hongyu.ning@linux.intel.com Tested-by: Vincent Guittot vincent.guittot@linaro.org Link: https://lkml.kernel.org/r/20210422123308.617407840@infradead.org Signed-off-by: Lin Shengwang linshengwang1@huawei.com Reviewed-by: lihua hucool.lihua@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- kernel/sched/core.c | 15 ++++++++------- kernel/sched/fair.c | 40 ++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 2 +- 3 files changed, 49 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a010117eb540..1f9e156fe6ab 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4894,16 +4894,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/* reset state */ rq->core->core_cookie = 0UL; + if (rq->core->core_forceidle) { + need_sync = true; + rq->core->core_forceidle = false; + } for_each_cpu(i, smt_mask) { struct rq *rq_i = cpu_rq(i);
rq_i->core_pick = NULL;
- if (rq_i->core_forceidle) { - need_sync = true; - rq_i->core_forceidle = false; - } - if (i != cpu) update_rq_clock(rq_i); } @@ -5023,8 +5022,10 @@ next_class:; if (!rq_i->core_pick) continue;
- if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running) - rq_i->core_forceidle = true; + if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running && + !rq_i->core->core_forceidle) { + rq_i->core->core_forceidle = true; + }
if (i == cpu) { rq_i->core_pick = NULL; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 21ef241894e2..a2651e159c23 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11533,6 +11533,44 @@ static void rq_offline_fair(struct rq *rq)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_CORE +static inline bool +__entity_slice_used(struct sched_entity *se, int min_nr_tasks) +{ + u64 slice = sched_slice(cfs_rq_of(se), se); + u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; + + return (rtime * min_nr_tasks > slice); +} + +#define MIN_NR_TASKS_DURING_FORCEIDLE 2 +static inline void task_tick_core(struct rq *rq, struct task_struct *curr) +{ + if (!sched_core_enabled(rq)) + return; + + /* + * If runqueue has only one task which used up its slice and + * if the sibling is forced idle, then trigger schedule to + * give forced idle task a chance. + * + * sched_slice() considers only this active rq and it gets the + * whole slice. But during force idle, we have siblings acting + * like a single runqueue and hence we need to consider runnable + * tasks on this cpu and the forced idle cpu. Ideally, we should + * go through the forced idle rq, but that would be a perf hit. + * We can assume that the forced idle cpu has atleast + * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check + * if we need to give up the cpu. + */ + if (rq->core->core_forceidle && rq->cfs.nr_running == 1 && + __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) + resched_curr(rq); +} +#else +static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} +#endif + /* * scheduler tick hitting a task of our scheduling class. * @@ -11556,6 +11594,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_misfit_status(curr, rq); update_overutilized_status(task_rq(curr)); + + task_tick_core(rq, curr); }
/* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1bbe70210e5d..044aded8314c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1114,12 +1114,12 @@ struct rq { unsigned int core_enabled; unsigned int core_sched_seq; struct rb_root core_tree; - unsigned char core_forceidle;
/* shared state */ unsigned int core_task_seq; unsigned int core_pick_seq; unsigned long core_cookie; + unsigned char core_forceidle; #endif
KABI_RESERVE(1)