From: Tejun Heo <tj@kernel.org> mainline inclusion from mainline-v6.12-rc1 commit a735d43c7f85d112a6aefd72973188d0626e4464 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IDC9YK Reference: https://web.git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commi... -------------------------------- On SMP, SCX performs dispatch from sched_class->balance(). As balance() was not available in UP, it instead called the internal balance function from put_prev_task_scx() and pick_next_task_scx() to emulate the effect, which is rather nasty. Enabling sched_class->balance() on UP shouldn't cause any meaningful overhead. Enable balance() on UP and drop the ugly workaround. Signed-off-by: Tejun Heo <tj@kernel.org> Suggested-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David Vernet <void@manifault.com> Signed-off-by: Zicheng Qu <quzicheng@huawei.com> --- kernel/sched/core.c | 4 +--- kernel/sched/ext.c | 41 +---------------------------------------- kernel/sched/sched.h | 2 +- 3 files changed, 3 insertions(+), 44 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e24aa71934bd..e15264735937 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5889,7 +5889,6 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt) static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { -#ifdef CONFIG_SMP const struct sched_class *start_class = prev->sched_class; const struct sched_class *class; @@ -5912,10 +5911,9 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, * a runnable task of @class priority or higher. */ for_active_class_range(class, start_class, &idle_sched_class) { - if (class->balance(rq, prev, rf)) + if (class->balance && class->balance(rq, prev, rf)) break; } -#endif put_prev_task(rq, prev); } diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 00bbe0db4649..645d12be385a 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -2626,7 +2626,6 @@ static int balance_one(struct rq *rq, struct task_struct *prev, bool local) return has_tasks; } -#ifdef CONFIG_SMP static int balance_scx(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { @@ -2660,7 +2659,6 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, return ret; } -#endif static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) { @@ -2729,37 +2727,6 @@ static void process_ddsp_deferred_locals(struct rq *rq) static void put_prev_task_scx(struct rq *rq, struct task_struct *p) { -#ifndef CONFIG_SMP - /* - * UP workaround. - * - * Because SCX may transfer tasks across CPUs during dispatch, dispatch - * is performed from its balance operation which isn't called in UP. - * Let's work around by calling it from the operations which come right - * after. - * - * 1. If the prev task is on SCX, pick_next_task() calls - * .put_prev_task() right after. As .put_prev_task() is also called - * from other places, we need to distinguish the calls which can be - * done by looking at the previous task's state - if still queued or - * dequeued with %SCX_DEQ_SLEEP, the caller must be pick_next_task(). - * This case is handled here. - * - * 2. If the prev task is not on SCX, the first following call into SCX - * will be .pick_next_task(), which is covered by calling - * balance_scx() from pick_next_task_scx(). - * - * Note that we can't merge the first case into the second as - * balance_scx() must be called before the previous SCX task goes - * through put_prev_task_scx(). - * - * @rq is pinned and can't be unlocked. As UP doesn't transfer tasks - * around, balance_one() doesn't need to. - */ - if (p->scx.flags & (SCX_TASK_QUEUED | SCX_TASK_DEQD_FOR_SLEEP)) - balance_one(rq, p, true); -#endif - update_curr_scx(rq); /* see dequeue_task_scx() on why we skip when !QUEUED */ @@ -2817,12 +2784,6 @@ static struct task_struct *pick_next_task_scx(struct rq *rq) { struct task_struct *p; -#ifndef CONFIG_SMP - /* UP workaround - see the comment at the head of put_prev_task_scx() */ - if (unlikely(rq->curr->sched_class != &ext_sched_class)) - balance_one(rq, rq->curr, true); -#endif - p = first_local_task(rq); if (!p) return NULL; @@ -3682,6 +3643,7 @@ DEFINE_SCHED_CLASS(ext) = { .wakeup_preempt = wakeup_preempt_scx, + .balance = balance_scx, .pick_next_task = pick_next_task_scx, .put_prev_task = put_prev_task_scx, @@ -3690,7 +3652,6 @@ DEFINE_SCHED_CLASS(ext) = { .switch_class = switch_class_scx, #ifdef CONFIG_SMP - .balance = balance_scx, .select_task_rq = select_task_rq_scx, .task_woken = task_woken_scx, .set_cpus_allowed = set_cpus_allowed_scx, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index bd0ad73ec51e..45776570014b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2527,13 +2527,13 @@ struct sched_class { KABI_REPLACE(void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags), void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags)) + int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); struct task_struct *(*pick_next_task)(struct rq *rq); void (*put_prev_task)(struct rq *rq, struct task_struct *p); void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); #ifdef CONFIG_SMP - int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); struct task_struct * (*pick_task)(struct rq *rq); -- 2.34.1