From: Dietmar Eggemann dietmar.eggemann@arm.com
stable inclusion from stable-v5.10.137 commit 0039189a3b15dbf71965261ec05bd9b279ad9498 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60PLB
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
[ Upstream commit 772b6539fdda31462cc08368e78df60b31a58bab ]
Both functions are doing almost the same, that is checking if admission control is still respected.
With exclusive cpusets, dl_task_can_attach() checks if the destination cpuset (i.e. its root domain) has enough CPU capacity to accommodate the task. dl_cpu_busy() checks if there is enough CPU capacity in the cpuset in case the CPU is hot-plugged out.
dl_task_can_attach() is used to check if a task can be admitted while dl_cpu_busy() is used to check if a CPU can be hotplugged out.
Make dl_cpu_busy() able to deal with a task and use it instead of dl_task_can_attach() in task_can_attach().
Signed-off-by: Dietmar Eggemann dietmar.eggemann@arm.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Acked-by: Juri Lelli juri.lelli@redhat.com Link: https://lore.kernel.org/r/20220302183433.333029-4-dietmar.eggemann@arm.com Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Reviewed-by: Wei Li liwei391@huawei.com --- kernel/sched/core.c | 13 +++++++---- kernel/sched/deadline.c | 52 +++++++++++------------------------------ kernel/sched/sched.h | 3 +-- 3 files changed, 24 insertions(+), 44 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c936c042297b..87049229867e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7680,8 +7680,11 @@ int task_can_attach(struct task_struct *p, }
if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, - cs_cpus_allowed)) - ret = dl_task_can_attach(p, cs_cpus_allowed); + cs_cpus_allowed)) { + int cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); + + ret = dl_cpu_busy(cpu, p); + }
out: return ret; @@ -7940,8 +7943,10 @@ static void cpuset_cpu_active(void) static int cpuset_cpu_inactive(unsigned int cpu) { if (!cpuhp_tasks_frozen) { - if (dl_cpu_busy(cpu)) - return -EBUSY; + int ret = dl_cpu_busy(cpu, NULL); + + if (ret) + return ret; cpuset_update_active_cpus(); } else { num_cpus_frozen++; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a6b69fae752e..c1994154920f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2843,41 +2843,6 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) }
#ifdef CONFIG_SMP -int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed) -{ - unsigned long flags, cap; - unsigned int dest_cpu; - struct dl_bw *dl_b; - bool overflow; - int ret; - - dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); - - rcu_read_lock_sched(); - dl_b = dl_bw_of(dest_cpu); - raw_spin_lock_irqsave(&dl_b->lock, flags); - cap = dl_bw_capacity(dest_cpu); - overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw); - if (overflow) { - ret = -EBUSY; - } else { - /* - * We reserve space for this task in the destination - * root_domain, as we can't fail after this point. - * We will free resources in the source root_domain - * later on (see set_cpus_allowed_dl()). - */ - int cpus = dl_bw_cpus(dest_cpu); - - __dl_add(dl_b, p->dl.dl_bw, cpus); - ret = 0; - } - raw_spin_unlock_irqrestore(&dl_b->lock, flags); - rcu_read_unlock_sched(); - - return ret; -} - int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial) { @@ -2899,7 +2864,7 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, return ret; }
-bool dl_cpu_busy(unsigned int cpu) +int dl_cpu_busy(int cpu, struct task_struct *p) { unsigned long flags, cap; struct dl_bw *dl_b; @@ -2909,11 +2874,22 @@ bool dl_cpu_busy(unsigned int cpu) dl_b = dl_bw_of(cpu); raw_spin_lock_irqsave(&dl_b->lock, flags); cap = dl_bw_capacity(cpu); - overflow = __dl_overflow(dl_b, cap, 0, 0); + overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0); + + if (!overflow && p) { + /* + * We reserve space for this task in the destination + * root_domain, as we can't fail after this point. + * We will free resources in the source root_domain + * later on (see set_cpus_allowed_dl()). + */ + __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu)); + } + raw_spin_unlock_irqrestore(&dl_b->lock, flags); rcu_read_unlock_sched();
- return overflow; + return overflow ? -EBUSY : 0; } #endif
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3bd6c988652a..ad00ba5f6d82 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -351,9 +351,8 @@ extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); extern bool __checkparam_dl(const struct sched_attr *attr); extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); -extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern bool dl_cpu_busy(unsigned int cpu); +extern int dl_cpu_busy(int cpu, struct task_struct *p);
#ifdef CONFIG_CGROUP_SCHED