From: Peter Zijlstra peterz@infradead.org
stable inclusion from stable-v6.6.2 commit d03b4817436286eeaa1ecb3606064fca31095fd2 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8IW7G
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
[ Upstream commit f0498d2a54e7966ce23cd7c7ff42c64fa0059b07 ]
Kuyo reported sporadic failures on a sched_setaffinity() vs CPU hotplug stress-test -- notably affine_move_task() remains stuck in wait_for_completion(), leading to a hung-task detector warning.
Specifically, it was reported that stop_one_cpu_nowait(.fn = migration_cpu_stop) returns false -- this stopper is responsible for the matching complete().
The race scenario is:
CPU0 CPU1
// doing _cpu_down()
__set_cpus_allowed_ptr() task_rq_lock(); takedown_cpu() stop_machine_cpuslocked(take_cpu_down..)
<PREEMPT: cpu_stopper_thread() MULTI_STOP_PREPARE ... __set_cpus_allowed_ptr_locked() affine_move_task() task_rq_unlock();
<PREEMPT: cpu_stopper_thread()> ack_state() MULTI_STOP_RUN take_cpu_down() __cpu_disable(); stop_machine_park(); stopper->enabled = false; /> /> stop_one_cpu_nowait(.fn = migration_cpu_stop); if (stopper->enabled) // false!!!
That is, by doing stop_one_cpu_nowait() after dropping rq-lock, the stopper thread gets a chance to preempt and allows the cpu-down for the target CPU to complete.
OTOH, since stop_one_cpu_nowait() / cpu_stop_queue_work() needs to issue a wakeup, it must not be ran under the scheduler locks.
Solve this apparent contradiction by keeping preemption disabled over the unlock + queue_stopper combination:
preempt_disable(); task_rq_unlock(...); if (!stop_pending) stop_one_cpu_nowait(...) preempt_enable();
This respects the lock ordering contraints while still avoiding the above race. That is, if we find the CPU is online under rq-lock, the targeted stop_one_cpu_nowait() must succeed.
Apply this pattern to all similar stop_one_cpu_nowait() invocations.
Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()") Reported-by: "Kuyo Chang (張建文)" Kuyo.Chang@mediatek.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Tested-by: "Kuyo Chang (張建文)" Kuyo.Chang@mediatek.com Link: https://lkml.kernel.org/r/20231010200442.GA16515@noisy.programming.kicks-ass... Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- kernel/sched/core.c | 10 ++++++++-- kernel/sched/deadline.c | 2 ++ kernel/sched/fair.c | 4 +++- kernel/sched/rt.c | 4 ++++ 4 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 802551e0009b..9e9a45a3394f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2664,9 +2664,11 @@ static int migration_cpu_stop(void *data) * it. */ WARN_ON_ONCE(!pending->stop_pending); + preempt_disable(); task_rq_unlock(rq, p, &rf); stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, &pending->arg, &pending->stop_work); + preempt_enable(); return 0; } out: @@ -2986,12 +2988,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag complete = true; }
+ preempt_disable(); task_rq_unlock(rq, p, rf); - if (push_task) { stop_one_cpu_nowait(rq->cpu, push_cpu_stop, p, &rq->push_work); } + preempt_enable();
if (complete) complete_all(&pending->done); @@ -3057,12 +3060,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag if (flags & SCA_MIGRATE_ENABLE) p->migration_flags &= ~MDF_PUSH;
+ preempt_disable(); task_rq_unlock(rq, p, rf); - if (!stop_pending) { stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, &pending->arg, &pending->stop_work); } + preempt_enable();
if (flags & SCA_MIGRATE_ENABLE) return 0; @@ -9505,9 +9509,11 @@ static void balance_push(struct rq *rq) * Temporarily drop rq->lock such that we can wake-up the stop task. * Both preemption and IRQs are still disabled. */ + preempt_disable(); raw_spin_rq_unlock(rq); stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, this_cpu_ptr(&push_work)); + preempt_enable(); /* * At this point need_resched() is true and we'll take the loop in * schedule(). The next pick is obviously going to be the stop task diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 58b542bf2893..d78f2e8769fb 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2449,9 +2449,11 @@ static void pull_dl_task(struct rq *this_rq) double_unlock_balance(this_rq, src_rq);
if (push_task) { + preempt_disable(); raw_spin_rq_unlock(this_rq); stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, push_task, &src_rq->push_work); + preempt_enable(); raw_spin_rq_lock(this_rq); } } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4dee83c82ac3..1795f6fe991f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11234,13 +11234,15 @@ static int load_balance(int this_cpu, struct rq *this_rq, busiest->push_cpu = this_cpu; active_balance = 1; } - raw_spin_rq_unlock_irqrestore(busiest, flags);
+ preempt_disable(); + raw_spin_rq_unlock_irqrestore(busiest, flags); if (active_balance) { stop_one_cpu_nowait(cpu_of(busiest), active_load_balance_cpu_stop, busiest, &busiest->active_balance_work); } + preempt_enable(); } } else { sd->nr_balance_failed = 0; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 0597ba0f85ff..904dd8534597 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2109,9 +2109,11 @@ static int push_rt_task(struct rq *rq, bool pull) */ push_task = get_push_task(rq); if (push_task) { + preempt_disable(); raw_spin_rq_unlock(rq); stop_one_cpu_nowait(rq->cpu, push_cpu_stop, push_task, &rq->push_work); + preempt_enable(); raw_spin_rq_lock(rq); }
@@ -2448,9 +2450,11 @@ static void pull_rt_task(struct rq *this_rq) double_unlock_balance(this_rq, src_rq);
if (push_task) { + preempt_disable(); raw_spin_rq_unlock(this_rq); stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, push_task, &src_rq->push_work); + preempt_enable(); raw_spin_rq_lock(this_rq); } }