hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID8IIO ----------------------------------------- xcu_move_task() previously performed dequeue/attach/enqueue operations while holding old_xcg->lock (a spinlock), and then acquired xcu->xcu_lock (a mutex) inside that critical section. This patch narrows the scope of old_xcg->lock to only protect the list operation that removes the xse from old_xcg->members. The lock is then released before enqueue/dequeue operations, which are handled under xcu->xcu_lock instead. Fixes: 43bbefc53356 ("xsched: Add XCU control group implementation and its backend in xsched CFS") Signed-off-by: Zicheng Qu <quzicheng@huawei.com> --- kernel/xsched/cgroup.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/xsched/cgroup.c b/kernel/xsched/cgroup.c index 835508325536..eabdb3f987dd 100644 --- a/kernel/xsched/cgroup.c +++ b/kernel/xsched/cgroup.c @@ -424,21 +424,24 @@ void xcu_move_task(struct task_struct *task, struct xsched_group *old_xcg, struct xsched_cu *xcu; spin_lock(&old_xcg->lock); + list_for_each_entry_safe(xse, tmp, &old_xcg->members, group_node) { if (xse->owner_pid != task_pid_nr(task)) continue; - xcu = xse->xcu; - if (old_xcg != xse->parent_grp) { WARN_ON(old_xcg != xse->parent_grp); spin_unlock(&old_xcg->lock); return; } + xcu = xse->xcu; + /* delete from the old_xcg */ list_del(&xse->group_node); + spin_unlock(&old_xcg->lock); + mutex_lock(&xcu->xcu_lock); /* dequeue from the current runqueue */ dequeue_ctx(xse, xcu); @@ -447,7 +450,10 @@ void xcu_move_task(struct task_struct *task, struct xsched_group *old_xcg, /* enqueue to the runqueue in new_xcg */ enqueue_ctx(xse, xcu); mutex_unlock(&xcu->xcu_lock); + + return; } + spin_unlock(&old_xcg->lock); } -- 2.34.1