From: Peter Zijlstra <peterz@infradead.org> mainline inclusion from mainline-v6.12-rc1 commit b2d70222dbf2a2ff7a972a685d249a5d75afa87f category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID7HLY Reference: https://web.git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commi... ------------------------------- In order to tell the previous sched_class what the next task is, add put_prev_task(.next). Notable SCX will use this to: 1) determine the next task will leave the SCX sched class and push the current task to another CPU if possible. 2) statistics on how often and which other classes preempt it Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240813224016.367421076@infradead.org Conflicts: kernel/sched/ext.c kernel/sched/deadline.c [This mainline patch changes the interface put_prev_task, but did not change the implementation put_prev_task_scx, so modify it together. For ext.c, the patch d7b01aef9dbd ("Merge branch 'tip/sched/core' into for-6.12") is not merged in, so add the necessary code here.] Signed-off-by: Zicheng Qu <quzicheng@huawei.com> Signed-off-by: cheliequan <cheliequan@inspur.com> Signed-off-by: Luo Gengkun <luogengkun2@huawei.com> --- kernel/sched/deadline.c | 2 +- kernel/sched/ext.c | 7 ++++--- kernel/sched/fair.c | 2 +- kernel/sched/idle.c | 2 +- kernel/sched/rt.c | 2 +- kernel/sched/sched.h | 6 +++--- kernel/sched/stop_task.c | 2 +- 7 files changed, 12 insertions(+), 11 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e1b627da0efa..d55e3105e62d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2031,7 +2031,7 @@ static struct task_struct *pick_task_dl(struct rq *rq) return p; } -static void put_prev_task_dl(struct rq *rq, struct task_struct *p) +static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next) { struct sched_dl_entity *dl_se = &p->dl; struct dl_rq *dl_rq = &rq->dl; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index e611bf6b4950..93008e4a1e2e 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -2728,7 +2728,8 @@ static void process_ddsp_deferred_locals(struct rq *rq) } } -static void put_prev_task_scx(struct rq *rq, struct task_struct *p) +static void put_prev_task_scx(struct rq *rq, struct task_struct *p, + struct task_struct *next) { update_curr_scx(rq); @@ -2789,14 +2790,14 @@ static struct task_struct *pick_next_task_scx(struct rq *rq, struct task_struct *p; if (prev->sched_class == &ext_sched_class) - put_prev_task_scx(rq, prev); + put_prev_task_scx(rq, prev, NULL); p = first_local_task(rq); if (!p) return NULL; if (prev->sched_class != &ext_sched_class) - prev->sched_class->put_prev_task(rq, prev); + prev->sched_class->put_prev_task(rq, prev, p); set_next_task_scx(rq, p, true); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4cb872d91ecb..a7eb4ad50cb3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10389,7 +10389,7 @@ static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_stru /* * Account for a descheduled task: */ -static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) +static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct sched_entity *se = &prev->se; struct cfs_rq *cfs_rq; diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 12081077893e..26e8c5061cff 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -446,7 +446,7 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags) resched_curr(rq); } -static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next) { scx_update_idle(rq, false); } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 62058bf546f0..8fcf63588472 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1808,7 +1808,7 @@ static struct task_struct *pick_task_rt(struct rq *rq) return p; } -static void put_prev_task_rt(struct rq *rq, struct task_struct *p) +static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next) { struct sched_rt_entity *rt_se = &p->rt; struct rt_rq *rt_rq = &rq->rt; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a2de66a260f4..895dab1b2d33 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2549,7 +2549,7 @@ struct sched_class { KABI_REPLACE(struct task_struct *(*pick_next_task)(struct rq *rq), struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev)) - void (*put_prev_task)(struct rq *rq, struct task_struct *p); + void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next); void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); #ifdef CONFIG_SMP @@ -2608,7 +2608,7 @@ struct sched_class { static inline void put_prev_task(struct rq *rq, struct task_struct *prev) { WARN_ON_ONCE(rq->curr != prev); - prev->sched_class->put_prev_task(rq, prev); + prev->sched_class->put_prev_task(rq, prev, NULL); } static inline void set_next_task(struct rq *rq, struct task_struct *next) @@ -2625,7 +2625,7 @@ static inline void put_prev_set_next_task(struct rq *rq, if (next == prev) return; - prev->sched_class->put_prev_task(rq, prev); + prev->sched_class->put_prev_task(rq, prev, next); next->sched_class->set_next_task(rq, next, true); } diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 0fd5352ff0ce..058dd42e3d9b 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -59,7 +59,7 @@ static void yield_task_stop(struct rq *rq) BUG(); /* the stop task should never yield, its pointless. */ } -static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) +static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next) { update_curr_common(rq); } -- 2.34.1