From: Hui Tang tanghui20@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7A718
--------------------------------
Add static key to reduce noise when not enable dynamic affinity. There are better performance in some case, such for lmbench.
Fixes: 243865da2684 ("cpuset: Introduce new interface for scheduler ...") Signed-off-by: Hui Tang tanghui20@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Zhang Changzhong zhangchangzhong@huawei.com --- include/linux/sched.h | 1 + kernel/cgroup/cpuset.c | 3 +++ kernel/sched/fair.c | 32 ++++++++++++++++++++++++++++++-- 3 files changed, 34 insertions(+), 2 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h index 3e95733..8fd8c5b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2007,6 +2007,7 @@ int set_prefer_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask); int sched_prefer_cpus_fork(struct task_struct *p, struct task_struct *orig); void sched_prefer_cpus_free(struct task_struct *p); +void dynamic_affinity_enable(void); #endif
#ifdef CONFIG_QOS_SCHED_SMART_GRID diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index d7ec7f9..ae2b1ad 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -625,6 +625,9 @@ static int update_prefer_cpumask(struct cpuset *cs, struct cpuset *trialcs,
update_tasks_prefer_cpumask(trialcs);
+ if (!cpumask_empty(trialcs->prefer_cpus)) + dynamic_affinity_enable(); + spin_lock_irq(&callback_lock); cpumask_copy(cs->prefer_cpus, trialcs->prefer_cpus); spin_unlock_irq(&callback_lock); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1cd08e1..19ee03b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7131,6 +7131,29 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) }
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY + +#ifdef CONFIG_JUMP_LABEL +static DEFINE_STATIC_KEY_FALSE(__dynamic_affinity_used); + +static inline bool dynamic_affinity_used(void) +{ + return static_branch_unlikely(&__dynamic_affinity_used); +} + +void dynamic_affinity_enable(void) +{ + static_branch_enable_cpuslocked(&__dynamic_affinity_used); +} + +#else /* CONFIG_JUMP_LABEL */ +static bool dynamic_affinity_used(void) +{ + return true; +} + +void dynamic_affinity_enable(void) {} +#endif + /* * Low utilization threshold for CPU * @@ -7246,7 +7269,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f time = schedstat_start_time();
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY - set_task_select_cpus(p, &idlest_cpu, sd_flag); + p->select_cpus = &p->cpus_allowed; + if (dynamic_affinity_used()) + set_task_select_cpus(p, &idlest_cpu, sd_flag); #endif
if (sd_flag & SD_BALANCE_WAKE) { @@ -8272,7 +8297,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) return 0;
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY - set_task_select_cpus(p, NULL, 0); + p->select_cpus = &p->cpus_allowed; + if (dynamic_affinity_used()) + set_task_select_cpus(p, NULL, 0); + if (!cpumask_test_cpu(env->dst_cpu, p->select_cpus)) { #else if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {