From: Peter Zijlstra <peterz@infradead.org> mainline inclusion from mainline-v6.12-rc1 commit dae4320b29f0bbdae93f7c1f6f80b19f109ca0bc category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IDC9YK Reference: https://web.git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commi... -------------------------------- The rule is that: pick_next_task() := pick_task() + set_next_task(.first = true) Turns out, there's still a few things in pick_next_task() that are missing from that combination. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240813224015.724111109@infradead.org Conflicts: kernel/sched/fair.c kernel/sched/deadline.c [fair:Plenty of conflicts with 89bf80a4d6d5 ("sched: Introduce priority load balance for qos scheduler"), a3c9f2da0a35 ("sched: Introduce handle priority reversion mechanism"), ceea34dd528c ("sched: Implement the function of qos smt expeller"), fix the conflicts based on code logic. Also, remove the warn because does not contain the delayed queue. Remove the `se->sched_delayed` because 82e9d0456e06 ("sched/fair: Avoid re-setting virtual deadline on 'migrations'") is not merged. deadline: 63ba8422f876 ("sched/deadline: Introduce deadline servers") is not merged, so ignore the changes in deadline.c] Signed-off-by: Zicheng Qu <quzicheng@huawei.com> --- kernel/sched/fair.c | 79 ++++++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 508fbe8cbcf8..1b40cf7aecab 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10212,6 +10212,9 @@ static struct task_struct *pick_task_fair(struct rq *rq) return task_of(se); } +static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first); +static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first); + struct task_struct * pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { @@ -10284,9 +10287,11 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf put_prev_entity(cfs_rq, pse); set_next_entity(cfs_rq, se); + + __set_next_task_fair(rq, p, true); } - goto done; + return p; #ifdef CONFIG_QOS_SCHED qos_simple: @@ -10312,45 +10317,15 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf se = parent_entity(se); } - goto done; + __set_next_task_fair(rq, p, true); + return p; #endif simple: #endif if (prev) put_prev_task(rq, prev); - - for_each_sched_entity(se) - set_next_entity(cfs_rq_of(se), se); - -done: __maybe_unused; -#ifdef CONFIG_SMP - /* - * Move the next running task to the front of - * the list, so our cfs_tasks list becomes MRU - * one. - */ -#ifdef CONFIG_QOS_SCHED_PRIO_LB - adjust_rq_cfs_tasks(list_move, rq, &p->se); -#else - list_move(&p->se.group_node, &rq->cfs_tasks); -#endif -#endif - - if (hrtick_enabled_fair(rq)) - hrtick_start_fair(rq, p); - - update_misfit_status(p, rq); - sched_fair_update_stop_tick(rq, p); - -#ifdef CONFIG_QOS_SCHED - qos_schedule_throttle(p); -#endif - -#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER - qos_smt_expel(this_cpu, p); -#endif - + set_next_task_fair(rq, p, true); return p; idle: @@ -14898,12 +14873,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) } } -/* Account for a task changing its policy or group. - * - * This routine is mostly called to set cfs_rq->curr field when a task - * migrates between groups/classes. - */ -static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) +static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) { struct sched_entity *se = &p->se; @@ -14920,6 +14890,33 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) #endif } #endif + if (!first) + return; + + if (hrtick_enabled_fair(rq)) + hrtick_start_fair(rq, p); + + update_misfit_status(p, rq); + sched_fair_update_stop_tick(rq, p); + +#ifdef CONFIG_QOS_SCHED + qos_schedule_throttle(p); +#endif + +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER + qos_smt_expel(rq->cpu, p); +#endif +} + +/* + * Account for a task changing its policy or group. + * + * This routine is mostly called to set cfs_rq->curr field when a task + * migrates between groups/classes. + */ +static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) +{ + struct sched_entity *se = &p->se; for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); @@ -14928,6 +14925,8 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) /* ensure bandwidth has been allocated on our new cfs_rq */ account_cfs_rq_runtime(cfs_rq, 0); } + + __set_next_task_fair(rq, p, first); } void init_cfs_rq(struct cfs_rq *cfs_rq) -- 2.34.1