From: Peter Zijlstra <peterz@infradead.org> mainline inclusion from mainline-v7.0-rc2 commit bcd74b2ffdd0a2233adbf26b65c62fc69a809c8e category: bugfix bugzilla: https://atomgit.com/openeuler/kernel/issues/8903 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i... -------------------------------- We should not (re)set slice protection in the sched_change pattern which calls put_prev_task() / set_next_task(). Fixes: 63304558ba5d ("sched/eevdf: Curb wakeup-preemption") Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com> Link: https://patch.msgid.link/20260219080624.561421378%40infradead.org Conflicts: kernel/sched/fair.c [set_protect_slice() only has one parameter, so keep it original.] Signed-off-by: Zicheng Qu <quzicheng@huawei.com> --- kernel/sched/fair.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9c2fc59a8c58..337478725843 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5609,7 +5609,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) } static void -set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) +set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool first) { clear_buddies(cfs_rq, se); @@ -5624,7 +5624,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __dequeue_entity(cfs_rq, se); update_load_avg(cfs_rq, se, UPDATE_TG); - set_protect_slice(se); + if (first) + set_protect_slice(se); } update_stats_curr_start(cfs_rq, se); @@ -10296,13 +10297,13 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf pse = parent_entity(pse); } if (se_depth >= pse_depth) { - set_next_entity(cfs_rq_of(se), se); + set_next_entity(cfs_rq_of(se), se, true); se = parent_entity(se); } } put_prev_entity(cfs_rq, pse); - set_next_entity(cfs_rq, se); + set_next_entity(cfs_rq, se, true); } goto done; @@ -10327,7 +10328,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf p = task_of(se); while (se) { - set_next_entity(cfs_rq_of(se), se); + set_next_entity(cfs_rq_of(se), se, true); se = parent_entity(se); } @@ -10341,7 +10342,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf do { se = pick_next_entity(cfs_rq, NULL); - set_next_entity(cfs_rq, se); + set_next_entity(cfs_rq, se, true); cfs_rq = group_cfs_rq(se); } while (cfs_rq); @@ -15185,7 +15186,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); - set_next_entity(cfs_rq, se); + set_next_entity(cfs_rq, se, first); /* ensure bandwidth has been allocated on our new cfs_rq */ account_cfs_rq_runtime(cfs_rq, 0); } -- 2.34.1