From: Cai Xinchen caixinchen1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I6TI3Y CVE: NA
--------------------------------
This reverts commit 4924308a1ca9cc2f791398836a8744c22078ffbd.
Signed-off-by: Cai Xinchen caixinchen1@huawei.com Reviewed-by: Wang Weiyang wangweiyang2@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- kernel/cgroup/cgroup.c | 50 +++++------------------------------------- kernel/cgroup/cpuset.c | 7 +++++- 2 files changed, 11 insertions(+), 46 deletions(-)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 6487df9a6be0..b01490b71f32 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -56,7 +56,6 @@ #include <linux/file.h> #include <linux/sched/cputime.h> #include <net/sock.h> -#include <linux/cpu.h>
#define CREATE_TRACE_POINTS #include <trace/events/cgroup.h> @@ -2213,45 +2212,6 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) } EXPORT_SYMBOL_GPL(task_cgroup_path);
-/** - * cgroup_attach_lock - Lock for ->attach() - * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem - * - * cgroup migration sometimes needs to stabilize threadgroups against forks and - * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach() - * implementations (e.g. cpuset), also need to disable CPU hotplug. - * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can - * lead to deadlocks. - * - * Bringing up a CPU may involve creating and destroying tasks which requires - * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside - * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while - * write-locking threadgroup_rwsem, the locking order is reversed and we end up - * waiting for an on-going CPU hotplug operation which in turn is waiting for - * the threadgroup_rwsem to be released to create new tasks. For more details: - * - * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu - * - * Resolve the situation by always acquiring cpus_read_lock() before optionally - * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that - * CPU hotplug is disabled on entry. - */ -static void cgroup_attach_lock(void) -{ - cpus_read_lock(); - percpu_down_write(&cgroup_threadgroup_rwsem); -} - -/** - * cgroup_attach_unlock - Undo cgroup_attach_lock() - * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem - */ -static void cgroup_attach_unlock(void) -{ - percpu_up_write(&cgroup_threadgroup_rwsem); - cpus_read_unlock(); -} - /** * cgroup_migrate_add_task - add a migration target task to a migration context * @task: target task @@ -2731,7 +2691,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) return ERR_PTR(-EINVAL);
- cgroup_attach_lock(); + percpu_down_write(&cgroup_threadgroup_rwsem);
rcu_read_lock(); if (pid) { @@ -2762,7 +2722,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) goto out_unlock_rcu;
out_unlock_threadgroup: - cgroup_attach_unlock(); + percpu_up_write(&cgroup_threadgroup_rwsem); out_unlock_rcu: rcu_read_unlock(); return tsk; @@ -2777,7 +2737,7 @@ void cgroup_procs_write_finish(struct task_struct *task) /* release reference from cgroup_procs_write_start() */ put_task_struct(task);
- cgroup_attach_unlock(); + percpu_up_write(&cgroup_threadgroup_rwsem); for_each_subsys(ss, ssid) if (ss->post_attach) ss->post_attach(); @@ -2858,7 +2818,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
lockdep_assert_held(&cgroup_mutex);
- cgroup_attach_lock(); + percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */ spin_lock_irq(&css_set_lock); @@ -2888,7 +2848,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ret = cgroup_migrate_execute(&mgctx); out_finish: cgroup_migrate_finish(&mgctx); - cgroup_attach_unlock(); + percpu_up_write(&cgroup_threadgroup_rwsem); return ret; }
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 1d13d64108a0..def36c3fc524 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1612,7 +1612,11 @@ static void cpuset_attach(struct cgroup_taskset *tset) cgroup_taskset_first(tset, &css); cs = css_cs(css);
- lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ + /* + * It should hold cpus lock because a cpu offline event can + * cause set_cpus_allowed_ptr() failed. + */ + get_online_cpus(); mutex_lock(&cpuset_mutex);
/* prepare for attach */ @@ -1675,6 +1679,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) wake_up(&cpuset_attach_wq);
mutex_unlock(&cpuset_mutex); + put_online_cpus(); }
/* The various types of files and directories in a cpuset file system */