On 2023/12/7 11:14, Wenyu Huang wrote:
From: Zhang Qiao zhangqiao22@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8MF4R CVE: NA
When online tasks occupy cpu long time, offline task will not get cpu to run, the priority inversion issue may be triggered in this case. If the above case occurs, we will unthrottle offline tasks and let its get a chance to run. When online tasks occupy cpu over 5s(defaule value), we will unthrottle offline tasks and enter a msleep loop before exit to usermode util the cpu goto idle.
Signed-off-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Wenyu Huang huangwenyu5@huawei.com
include/linux/resume_user_mode.h | 5 ++ include/linux/sched.h | 7 +++ include/linux/sched/sysctl.h | 5 ++ kernel/sched/core.c | 3 + kernel/sched/fair.c | 99 +++++++++++++++++++++++++++++++- kernel/sched/sched.h | 4 ++ kernel/sysctl.c | 24 ++++++++ 7 files changed, 146 insertions(+), 1 deletion(-)
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h index f8f3e958e9cf..255372856812 100644 --- a/include/linux/resume_user_mode.h +++ b/include/linux/resume_user_mode.h @@ -59,6 +59,11 @@ static inline void resume_user_mode_work(struct pt_regs *regs) blkcg_maybe_throttle_current();
rseq_handle_notify_resume(NULL, regs);
+#ifdef CONFIG_QOS_SCHED
- sched_qos_offline_wait();
+#endif
}
#endif /* LINUX_RESUME_USER_MODE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h index d796894806f4..ab69ae3808bb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2459,6 +2459,13 @@ extern void sched_set_stop_task(int cpu, struct task_struct *stop);
#ifdef CONFIG_QOS_SCHED void sched_move_offline_task(struct task_struct *p); +void sched_qos_offline_wait(void); +int sched_qos_cpu_overload(void); +#else +static inline int sched_qos_cpu_overload(void) +{
- return 0;
+} #endif
#endif diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 5a64582b086b..bc42a5f69e16 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -29,4 +29,9 @@ extern int sysctl_numa_balancing_mode; #define sysctl_numa_balancing_mode 0 #endif
+#ifdef CONFIG_QOS_SCHED +extern unsigned int sysctl_overload_detect_period; +extern unsigned int sysctl_offline_wait_interval; +#endif
- #endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ac3de8f7e27d..bff68b26d2e5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10018,6 +10018,9 @@ void __init sched_init(void) * We achieve this by letting root_task_group's tasks sit * directly in rq->cfs (i.e root_task_group->se[] = NULL). */ +#ifdef CONFIG_QOS_SCHED
init_qos_hrtimer(i);
+#endif init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); #endif /* CONFIG_FAIR_GROUP_SCHED */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b42262c3dd0d..c544b8ac2812 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -57,6 +57,11 @@ #include "stats.h" #include "autogroup.h"
+#ifdef CONFIG_QOS_SCHED +#include <linux/delay.h> +#include <linux/resume_user_mode.h> +#endif
- /*
- The initial- and re-scaling of tunables is configurable
@@ -133,6 +138,10 @@ int __weak arch_asym_cpu_priority(int cpu) #define QOS_THROTTLED 2
static DEFINE_PER_CPU_SHARED_ALIGNED(struct list_head, qos_throttled_cfs_rq); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct hrtimer, qos_overload_timer); +static DEFINE_PER_CPU(int, qos_cpu_overload); +unsigned int sysctl_overload_detect_period = 5000; /* in ms */ +unsigned int sysctl_offline_wait_interval = 100; /* in ms */ static int unthrottle_qos_cfs_rqs(int cpu); #endif
@@ -8303,7 +8312,7 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq) resched_curr(rq); }
-static int unthrottle_qos_cfs_rqs(int cpu) +static int __unthrottle_qos_cfs_rqs(int cpu) { struct cfs_rq *cfs_rq, *tmp_rq; int res = 0; @@ -8319,8 +8328,22 @@ static int unthrottle_qos_cfs_rqs(int cpu) return res; }
+static int unthrottle_qos_cfs_rqs(int cpu) +{
- int res;
- res = __unthrottle_qos_cfs_rqs(cpu);
- if (res)
hrtimer_cancel(&(per_cpu(qos_overload_timer, cpu)));
- return res;
+}
- static bool check_qos_cfs_rq(struct cfs_rq *cfs_rq) {
- if (unlikely(__this_cpu_read(qos_cpu_overload)))
return false;
- if (unlikely(cfs_rq && cfs_rq->tg->qos_level < 0 && !sched_idle_cpu(smp_processor_id()) && cfs_rq->h_nr_running == cfs_rq->idle_h_nr_running)) {
@@ -8345,6 +8368,74 @@ static inline void unthrottle_qos_sched_group(struct cfs_rq *cfs_rq) unthrottle_qos_cfs_rq(cfs_rq); rq_unlock_irqrestore(rq, &rf); }
+void sched_qos_offline_wait(void) +{
- long qos_level;
- while (unlikely(this_cpu_read(qos_cpu_overload))) {
rcu_read_lock();
qos_level = task_group(current)->qos_level;
rcu_read_unlock();
if (qos_level != -1 || fatal_signal_pending(current))
break;
schedule_timeout_killable(msecs_to_jiffies(sysctl_offline_wait_interval));
- }
+}
+int sched_qos_cpu_overload(void) +{
- return __this_cpu_read(qos_cpu_overload);
+}
+static enum hrtimer_restart qos_overload_timer_handler(struct hrtimer *timer) +{
- struct rq_flags rf;
- struct rq *rq = this_rq();
- rq_lock_irqsave(rq, &rf);
- if (__unthrottle_qos_cfs_rqs(smp_processor_id()))
__this_cpu_write(qos_cpu_overload, 1);
- rq_unlock_irqrestore(rq, &rf);
- return HRTIMER_NORESTART;
+}
+static void start_qos_hrtimer(int cpu) +{
- ktime_t time;
- struct hrtimer *hrtimer = &(per_cpu(qos_overload_timer, cpu));
- time = ktime_add_ms(hrtimer->base->get_time(), (u64)sysctl_overload_detect_period);
- hrtimer_set_expires(hrtimer, time);
- hrtimer_start_expires(hrtimer, HRTIMER_MODE_ABS_PINNED);
+}
+void init_qos_hrtimer(int cpu) +{
- struct hrtimer *hrtimer = &(per_cpu(qos_overload_timer, cpu));
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- hrtimer->function = qos_overload_timer_handler;
+}
+/*
- To avoid Priority inversion issues, when this cpu is qos_cpu_overload,
- we should schedule offline tasks to run so that they can leave kernel
- critical sections, and throttle them before returning to user mode.
- */
+static void qos_schedule_throttle(struct task_struct *p) +{
- if (unlikely(current->flags & PF_KTHREAD))
return;
- if (unlikely(this_cpu_read(qos_cpu_overload))) {
if (is_offline_task(p))
set_notify_resume(p);
- }
+}
#endif
#ifdef CONFIG_SMP
@@ -8507,6 +8598,10 @@ done: __maybe_unused; update_misfit_status(p, rq); sched_fair_update_stop_tick(rq, p);
+#ifdef CONFIG_QOS_SCHED
- qos_schedule_throttle(p);
+#endif
return p;
idle:
@@ -8531,6 +8626,8 @@ done: __maybe_unused; rq->idle_stamp = 0; goto again; }
- __this_cpu_write(qos_cpu_overload, 0); #endif /*
- rq is about to be idle, check if we need to update the
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3d5ce9a01068..3de84e95baf1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1411,6 +1411,10 @@ do { \ flags = _raw_spin_rq_lock_irqsave(rq); \ } while (0)
+#ifdef CONFIG_QOS_SCHED +void init_qos_hrtimer(int cpu); +#endif
- #ifdef CONFIG_SCHED_SMT extern void __update_idle_core(struct rq *rq);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 354a2d294f52..4d100e8e9046 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -96,6 +96,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); static const int six_hundred_forty_kb = 640 * 1024; #endif
+#ifdef CONFIG_QOS_SCHED +static int one_thousand = 1000; +static int hundred_thousand = 100000; +#endif
static const int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; @@ -2033,6 +2037,26 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, +#ifdef CONFIG_QOS_SCHED
- {
.procname = "qos_overload_detect_period_ms",
.data = &sysctl_overload_detect_period,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE_HUNDRED,
.extra2 = &hundred_thousand,
- },
- {
.procname = "qos_offline_wait_interval_ms",
.data = &sysctl_offline_wait_interval,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE_HUNDRED,
.extra2 = &one_thousand,
- },
+#endif
这种不建议放到sysctl,可以放到自己代码里面,社区也在不断清理这些sysctl
{ .procname = "max_rcu_stall_to_panic", .data = &sysctl_max_rcu_stall_to_panic,