From: Hui Tang tanghui20@huawei.com
hulk inclusion category: feature bugzilla: 187173, https://gitee.com/openeuler/kernel/issues/I5G4IH CVE: NA
--------------------------------
Signed-off-by: Hui Tang tanghui20@huawei.com Reviewed-by: Chen Hui judy.chenhui@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- include/linux/sched.h | 17 +++++++++++++++++ kernel/sched/core.c | 8 ++++++++ kernel/sched/debug.c | 7 +++++++ kernel/sched/fair.c | 8 +++++++- 4 files changed, 39 insertions(+), 1 deletion(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8f27fa3e5622..928186f16100 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -444,6 +444,15 @@ struct sched_statistics { #endif };
+#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY +struct dyn_affinity_stats { +#ifdef CONFIG_SCHEDSTATS + u64 nr_wakeups_preferred_cpus; + u64 nr_wakeups_force_preferred_cpus; +#endif +}; +#endif + struct sched_entity { /* For load-balancing: */ struct load_weight load; @@ -480,7 +489,15 @@ struct sched_entity { struct sched_avg avg; #endif
+#if !defined(__GENKSYMS__) +#if defined(CONFIG_QOS_SCHED_DYNAMIC_AFFINITY) + struct dyn_affinity_stats *dyn_affi_stats; +#else + KABI_RESERVE(1) +#endif +#else KABI_RESERVE(1) +#endif KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 835f7c6c00ba..970616070da8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7203,12 +7203,20 @@ int sched_prefer_cpus_fork(struct task_struct *p, struct task_struct *orig) else cpumask_clear(p->prefer_cpus);
+ p->se.dyn_affi_stats = kzalloc(sizeof(struct dyn_affinity_stats), + GFP_KERNEL); + if (!p->se.dyn_affi_stats) { + kfree(p->prefer_cpus); + p->prefer_cpus = NULL; + return -ENOMEM; + } return 0; }
void sched_prefer_cpus_free(struct task_struct *p) { kfree(p->prefer_cpus); + kfree(p->se.dyn_affi_stats); }
static void do_set_prefer_cpus(struct task_struct *p, diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index fcf2a07ece05..bcdfdaae3b73 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -925,6 +925,9 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m) void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, struct seq_file *m) { +#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY + struct dyn_affinity_stats *dyn_affi = p->se.dyn_affi_stats; +#endif unsigned long nr_switches;
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), @@ -983,6 +986,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts); P_SCHEDSTAT(se.statistics.nr_wakeups_passive); P_SCHEDSTAT(se.statistics.nr_wakeups_idle); +#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY + __P(dyn_affi->nr_wakeups_preferred_cpus); + __P(dyn_affi->nr_wakeups_force_preferred_cpus); +#endif
avg_atom = p->se.sum_exec_runtime; if (nr_switches) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7833ef8f32f4..bcc72537b6fa 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6714,6 +6714,8 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, if (available_idle_cpu(cpu)) { rcu_read_unlock(); p->select_cpus = p->prefer_cpus; + if (sd_flag & SD_BALANCE_WAKE) + schedstat_inc(p->se.dyn_affi_stats->nr_wakeups_preferred_cpus); return; }
@@ -6725,6 +6727,8 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu, if (tg_capacity > cpumask_weight(p->prefer_cpus) && util_avg_sum * 100 <= tg_capacity * sysctl_sched_util_low_pct) { p->select_cpus = p->prefer_cpus; + if (sd_flag & SD_BALANCE_WAKE) + schedstat_inc(p->se.dyn_affi_stats->nr_wakeups_preferred_cpus); } } #endif @@ -6814,8 +6818,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f rcu_read_unlock();
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY - if (!cpumask_test_cpu(new_cpu, p->select_cpus)) + if (!cpumask_test_cpu(new_cpu, p->select_cpus)) { new_cpu = idlest_cpu; + schedstat_inc(p->se.dyn_affi_stats->nr_wakeups_force_preferred_cpus); + } #endif schedstat_end_time(cpu_rq(cpu), time);