From: Konstantin Khlebnikov khlebnikov@yandex-team.ru
mainline inclusion from mainline-v5.7-rc1 commit b4fb015eeff7f3e5518a7dbe8061169a3e2f2bc7 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I6DZRO CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Group RT scheduler contains protection against setting zero runtime for cgroup with RT tasks. Right now function tg_set_rt_bandwidth() iterates over all CPU cgroups and calls tg_has_rt_tasks() for any cgroup which runtime is zero (not only for changed one). Default RT runtime is zero, thus tg_has_rt_tasks() will is called for almost at CPU cgroups.
This protection already is slightly racy: runtime limit could be changed between cpu_cgroup_can_attach() and cpu_cgroup_attach() because changing cgroup attribute does not lock cgroup_mutex while attach does not lock rt_constraints_mutex. Changing task scheduler class also races with changing rt runtime: check in __sched_setscheduler() isn't protected.
Function tg_has_rt_tasks() iterates over all threads in the system. This gives NR_CGROUPS * NR_TASKS operations under single tasklist_lock locked for read tg_set_rt_bandwidth(). Any concurrent attempt of locking tasklist_lock for write (for example fork) will stuck with disabled irqs.
This patch makes two optimizations: 1) Remove locking tasklist_lock and iterate only tasks in cgroup 2) Call tg_has_rt_tasks() iff rt runtime changes from non-zero to zero
All changed code is under CONFIG_RT_GROUP_SCHED.
Testcase:
# mkdir /sys/fs/cgroup/cpu/test{1..10000} # echo 0 | tee /sys/fs/cgroup/cpu/test*/cpu.rt_runtime_us
At the same time without patch fork time will be >100ms:
# perf trace -e clone --duration 100 stress-ng --fork 1
Also remote ping will show timings >100ms caused by irq latency.
Signed-off-by: Konstantin Khlebnikov khlebnikov@yandex-team.ru Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Link: https://lkml.kernel.org/r/157996383820.4651.11292439232549211693.stgit@buzz Signed-off-by: Zhao Wenhui zhaowenhui8@huawei.com Reviewed-by: chenhui judy.chenhui@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- kernel/sched/rt.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a500bd78199b..4d6f1bc86b1b 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2423,10 +2423,11 @@ const struct sched_class rt_sched_class = { */ static DEFINE_MUTEX(rt_constraints_mutex);
-/* Must be called with tasklist_lock held */ static inline int tg_has_rt_tasks(struct task_group *tg) { - struct task_struct *g, *p; + struct task_struct *task; + struct css_task_iter it; + int ret = 0;
/* * Autogroups do not have RT tasks; see autogroup_create(). @@ -2434,12 +2435,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg) if (task_group_is_autogroup(tg)) return 0;
- for_each_process_thread(g, p) { - if (rt_task(p) && task_group(p) == tg) - return 1; - } + css_task_iter_start(&tg->css, 0, &it); + while (!ret && (task = css_task_iter_next(&it))) + ret |= rt_task(task); + css_task_iter_end(&it);
- return 0; + return ret; }
struct rt_schedulable_data { @@ -2470,9 +2471,10 @@ static int tg_rt_schedulable(struct task_group *tg, void *data) return -EINVAL;
/* - * Ensure we don't starve existing RT tasks. + * Ensure we don't starve existing RT tasks if runtime turns zero. */ - if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) + if (rt_bandwidth_enabled() && !runtime && + tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) return -EBUSY;
total = to_ratio(period, runtime); @@ -2538,7 +2540,6 @@ static int tg_set_rt_bandwidth(struct task_group *tg, return -EINVAL;
mutex_lock(&rt_constraints_mutex); - read_lock(&tasklist_lock); err = __rt_schedulable(tg, rt_period, rt_runtime); if (err) goto unlock; @@ -2556,7 +2557,6 @@ static int tg_set_rt_bandwidth(struct task_group *tg, } raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); unlock: - read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex);
return err; @@ -2615,9 +2615,7 @@ static int sched_rt_global_constraints(void) int ret = 0;
mutex_lock(&rt_constraints_mutex); - read_lock(&tasklist_lock); ret = __rt_schedulable(NULL, 0, 0); - read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex);
return ret;