From: tanghui tanghui20@huawei.com
hulk inclusion category: feature bugzilla: 186575, https://gitee.com/openeuler/kernel/issues/I526XC
--------------------------------
Signed-off-by: tanghui tanghui20@huawei.com Signed-off-by: Zheng Zucheng zhengzucheng@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com --- include/linux/sched.h | 6 ++++++ kernel/sched/debug.c | 4 ++++ kernel/sched/fair.c | 11 +++++++++-- 3 files changed, 19 insertions(+), 2 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h index e91f6ea282a3..6abf68d8f1b8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -468,9 +468,15 @@ struct sched_statistics { KABI_RESERVE(1) KABI_RESERVE(2) #endif + +#if defined(CONFIG_QOS_SCHED_DYNAMIC_AFFINITY) && !defined(__GENKSYMS__) + u64 nr_wakeups_preferred_cpus; + u64 nr_wakeups_force_preferred_cpus; +#else KABI_RESERVE(3) KABI_RESERVE(4) #endif +#endif };
struct sched_entity { diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index dda6e77accc2..00f01518bbdd 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -977,6 +977,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P_SCHEDSTAT(se.statistics.nr_qos_smt_expelled); #endif
+#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY + P_SCHEDSTAT(se.statistics.nr_wakeups_preferred_cpus); + P_SCHEDSTAT(se.statistics.nr_wakeups_force_preferred_cpus); +#endif avg_atom = p->se.sum_exec_runtime; if (nr_switches) avg_atom = div64_ul(avg_atom, nr_switches); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 49e2886c4c6b..c2c1f8f6c12d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7009,6 +7009,8 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, if (available_idle_cpu(cpu)) { rcu_read_unlock(); p->select_cpus = p->prefer_cpus; + if (sd_flag & SD_BALANCE_WAKE) + schedstat_inc(p->se.statistics.nr_wakeups_preferred_cpus); return; }
@@ -7018,8 +7020,11 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, rcu_read_unlock();
if (tg_capacity > cpumask_weight(p->prefer_cpus) && - util_avg_sum * 100 <= tg_capacity * sysctl_sched_util_low_pct) + util_avg_sum * 100 <= tg_capacity * sysctl_sched_util_low_pct) { p->select_cpus = p->prefer_cpus; + if (sd_flag & SD_BALANCE_WAKE) + schedstat_inc(p->se.statistics.nr_wakeups_preferred_cpus); + } } #endif
@@ -7160,8 +7165,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f rcu_read_unlock();
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY - if (!cpumask_test_cpu(new_cpu, p->select_cpus)) + if (!cpumask_test_cpu(new_cpu, p->select_cpus)) { new_cpu = idlest_cpu; + schedstat_inc(p->se.statistics.nr_wakeups_force_preferred_cpus); + } #endif
schedstat_end_time(cpu_rq(cpu), time);