From: Tejun Heo <tj@kernel.org> mainline inclusion from mainline-v6.12-rc1 commit e83edbf88f18087b5b349c7ff701b030dd53dff3 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IDC9YK Reference: https://web.git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commi... -------------------------------- Currently, during a task weight change, sched core directly calls reweight_task() defined in fair.c if @p is on CFS. Let's make it a proper sched_class operation instead. CFS's reweight_task() is renamed to reweight_task_fair() and now called through sched_class. While it turns a direct call into an indirect one, set_load_weight() isn't called from a hot path and this change shouldn't cause any noticeable difference. This will be used to implement reweight_task for a new BPF extensible sched_class so that it can keep its cached task weight up-to-date. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: David Vernet <dvernet@meta.com> Acked-by: Josh Don <joshdon@google.com> Acked-by: Hao Luo <haoluo@google.com> Acked-by: Barret Rhoden <brho@google.com> Conflicts: kernel/sched/core.c kernel/sched/fair.c kernel/sched/sched.h [The correct patch order should be 9059393e4ec1 ("sched/fair: Use reweight_entity() for set_user_nice()"), e83edbf88f18 ("sched: Add sched_class->reweight_task()"), d32960528702 ("sched/fair: set_load_weight() must also call reweight_task() for SCHED_IDLE tasks"), but current patch is e83edbf88f18 ("sched: Add sched_class->reweight_task()"), and the incorrect order made some conflicts.] Signed-off-by: Zicheng Qu <quzicheng@huawei.com> --- kernel/sched/core.c | 4 ++-- kernel/sched/fair.c | 3 ++- kernel/sched/sched.h | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9d32d74d65d6..e9608606c184 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1332,8 +1332,8 @@ static void set_load_weight(struct task_struct *p, bool update_load) * SCHED_OTHER tasks have to update their load when changing their * weight */ - if (update_load && p->sched_class == &fair_sched_class) - reweight_task(p, &lw); + if (update_load && p->sched_class->reweight_task) + p->sched_class->reweight_task(task_rq(p), p, &lw); else p->se.load = lw; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6b30b3811c88..24d70dbea733 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4012,7 +4012,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, } } -void reweight_task(struct task_struct *p, const struct load_weight *lw) +static void reweight_task_fair(struct rq *rq, struct task_struct *p, const struct load_weight *lw) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); @@ -15316,6 +15316,7 @@ DEFINE_SCHED_CLASS(fair) = { .task_tick = task_tick_fair, .task_fork = task_fork_fair, + .reweight_task = reweight_task_fair, .prio_changed = prio_changed_fair, .switched_from = switched_from_fair, .switched_to = switched_to_fair, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0e21ad151ec9..f83fbe07c292 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2479,6 +2479,8 @@ struct sched_class { */ void (*switched_from)(struct rq *this_rq, struct task_struct *task); void (*switched_to) (struct rq *this_rq, struct task_struct *task); + void (*reweight_task)(struct rq *this_rq, struct task_struct *task, + const struct load_weight *lw); void (*prio_changed) (struct rq *this_rq, struct task_struct *task, int oldprio); @@ -2638,8 +2640,6 @@ extern void init_sched_dl_class(void); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); -extern void reweight_task(struct task_struct *p, const struct load_weight *lw); - extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); -- 2.34.1