hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9RZC8
--------------------------------
When using cgroup rt_bandwidth with RT_RUNTIME_SHARE, if there are cpu hotplug and cpu.rt_runtime_us changing concurrently, the warning in __disable_runtime may occur: [ 991.697692] WARNING: CPU: 0 PID: 49573 at kernel/sched/rt.c:802 rq_offline_rt+0x24d/0x260 [ 991.697795] CPU: 0 PID: 49573 Comm: kworker/1:0 Kdump: loaded Not tainted 6.9.0-rc1+ #4 [ 991.697800] Workqueue: events cpuset_hotplug_workfn [ 991.697803] RIP: 0010:rq_offline_rt+0x24d/0x260 [ 991.697825] Call Trace: [ 991.697827] <TASK> [ 991.697858] set_rq_offline.part.125+0x2d/0x70 [ 991.697864] rq_attach_root+0xda/0x110 [ 991.697867] cpu_attach_domain+0x433/0x860 [ 991.697880] partition_sched_domains_locked+0x2a8/0x3a0 [ 991.697885] rebuild_sched_domains_locked+0x608/0x800 [ 991.697895] rebuild_sched_domains+0x1b/0x30 [ 991.697897] cpuset_hotplug_workfn+0x4b6/0x1160 [ 991.697909] process_scheduled_works+0xad/0x430 [ 991.697917] worker_thread+0x105/0x270 [ 991.697922] kthread+0xde/0x110 [ 991.697928] ret_from_fork+0x2d/0x50 [ 991.697935] ret_from_fork_asm+0x11/0x20 [ 991.697940] </TASK> [ 991.697941] ---[ end trace 0000000000000000 ]---
That's how it happens: CPU0 CPU1 ----- -----
set_rq_offline(rq) __disable_runtime(rq) (1) tg_set_rt_bandwidth (2) do_balance_runtime (3) set_rq_online(rq) __enable_runtime(rq) (4)
In step(1) rt_rq->rt_runtime is set to RUNTIME_INF, and this rtrq's runtime is not supposed to change until its rq gets online. However, in step(2) tg_set_rt_bandwidth can set rt_rq->rt_runtime to rt_bandwidth.rt_runtime. Then, in step(3) rtrq's runtime is not RUNTIME_INF, so others can borrow rt_runtime from it. Finally, in step(4) the rq gets online, so its rtrq's runtime is set to rt_bandwidth.rt_runtime again, and Since then the total rt_runtime in the domain is increased by this way. After these steps, when offline cpu, rebuilding the sched_domain will offline all rq, and the last rq will find the rt_runtime is increased but nowhere to return.
To fix this, we can add a state RUNTIME_DISABLED, which means the runtime is disabled and should not be used. When the rq is offline, we can set its rtrq's rt_runtime to RUNTIME_DISABLED, and when rq online, reset it. And in tg_set_rt_bandwidth and do_balance_runtime, never change a disabled rt_runtime.
Fixes: 7def2be1dc67 ("sched: fix hotplug cpus on ia64") Closes: https://lore.kernel.org/all/47b4a790-9a27-2fc5-f2aa-f9981c6da015@huawei.com/ Co-developed-by: Hui Tang tanghui20@huawei.com Signed-off-by: Hui Tang tanghui20@huawei.com Signed-off-by: Zhao Wenhui zhaowenhui8@huawei.com --- kernel/sched/fair.c.rej | 52 +++++++++++++++++++++++++++++++++++++++++ kernel/sched/rt.c | 3 ++- kernel/sched/rt.c.rej | 11 +++++++++ 3 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 kernel/sched/fair.c.rej create mode 100644 kernel/sched/rt.c.rej
diff --git a/kernel/sched/fair.c.rej b/kernel/sched/fair.c.rej new file mode 100644 index 000000000000..7275501f1bf3 --- /dev/null +++ b/kernel/sched/fair.c.rej @@ -0,0 +1,52 @@ +diff a/kernel/sched/fair.c b/kernel/sched/fair.c (rejected hunks) +@@ -161,9 +161,12 @@ int __weak arch_asym_cpu_priority(int cpu) + #endif + + #ifdef CONFIG_QOS_SCHED +- ++struct qos_overload_checker { ++ struct hrtimer qos_overload_timer; ++ unsigned int cpu; ++}; + static DEFINE_PER_CPU_SHARED_ALIGNED(struct list_head, qos_throttled_cfs_rq); +-static DEFINE_PER_CPU_SHARED_ALIGNED(struct hrtimer, qos_overload_timer); ++static DEFINE_PER_CPU_SHARED_ALIGNED(struct qos_overload_checker, qos_overload_checker); + static DEFINE_PER_CPU(int, qos_cpu_overload); + unsigned int sysctl_overload_detect_period = 5000; /* in ms */ + unsigned int sysctl_offline_wait_interval = 100; /* in ms */ +@@ -7951,14 +7954,17 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ + } + + #ifdef CONFIG_QOS_SCHED ++static struct hrtimer *qos_timer(int cpu) { ++ return &(per_cpu_ptr(&qos_overload_checker, cpu)->qos_overload_timer); ++} + static inline bool qos_timer_is_activated(int cpu) + { +- return hrtimer_active(per_cpu_ptr(&qos_overload_timer, cpu)); ++ return hrtimer_active(qos_timer(cpu)); + } + + static inline void cancel_qos_timer(int cpu) + { +- hrtimer_cancel(per_cpu_ptr(&qos_overload_timer, cpu)); ++ hrtimer_cancel(qos_timer(cpu)); + } + + static inline bool is_offline_task(struct task_struct *p) +@@ -8183,10 +8189,13 @@ void sched_qos_offline_wait(void) + static enum hrtimer_restart qos_overload_timer_handler(struct hrtimer *timer) + { + struct rq_flags rf; +- struct rq *rq = this_rq(); ++ struct qos_overload_checker *checker = container_of(timer, ++ struct qos_overload_checker, qos_overload_timer); ++ int cpu = checker->cpu; ++ struct rq *rq = cpu_rq(cpu); + + rq_lock_irqsave(rq, &rf); +- __unthrottle_qos_cfs_rqs(smp_processor_id()); ++ __unthrottle_qos_cfs_rqs(cpu); + __this_cpu_write(qos_cpu_overload, 1); + + /* Determine whether we need to wake up potentially idle CPU. */ diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 58364f489529..bb1f76264686 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -664,7 +664,8 @@ static void do_balance_runtime(struct rt_rq *rt_rq) * or __disable_runtime() below sets a specific rq to inf to * indicate its been disabled and disalow stealing. */ - if (iter->rt_runtime == RUNTIME_INF) + if (iter->rt_runtime == RUNTIME_INF || + iter->rt_runtime == RUNTIME_DISABLED) goto next;
/* diff --git a/kernel/sched/rt.c.rej b/kernel/sched/rt.c.rej new file mode 100644 index 000000000000..710818859059 --- /dev/null +++ b/kernel/sched/rt.c.rej @@ -0,0 +1,11 @@ +diff a/kernel/sched/rt.c b/kernel/sched/rt.c (rejected hunks) +@@ -704,7 +704,8 @@ static void do_balance_runtime(struct rt_rq *rt_rq) + * or __disable_runtime() below sets a specific rq to inf to + * indicate its been disabled and disallow stealing. + */ +- if (iter->rt_runtime == RUNTIME_INF) ++ if (iter->rt_runtime == RUNTIME_INF || ++ iter->rt_runtime == RUNTIME_DISABLED) + goto next; + + /*