hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LL9S
--------------------------------
Add cmdline 'dynamic_affinity' to control dynamic affinity feature, which is disabled by default.
Signed-off-by: Hui Tang tanghui20@huawei.com --- include/linux/sched.h | 6 ++++++ kernel/cgroup/cpuset.c | 3 +++ kernel/fork.c | 11 +++++++---- kernel/sched/core.c | 3 +++ kernel/sched/debug.c | 6 ++++-- kernel/sched/fair.c | 13 +++++++++++++ 6 files changed, 36 insertions(+), 6 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h index 479ee3cece5d..fe8556ff7fb3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2484,5 +2484,11 @@ int set_prefer_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask); int sched_prefer_cpus_fork(struct task_struct *p, struct cpumask *mask); void sched_prefer_cpus_free(struct task_struct *p); + +extern struct static_key_false __dynamic_affinity_switch; +static inline bool dynamic_affinity_enabled(void) +{ + return static_branch_unlikely(&__dynamic_affinity_switch); +} #endif #endif diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 01f4ff02e7b2..cfdca8aeabda 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -858,6 +858,9 @@ static int update_prefer_cpumask(struct cpuset *cs, struct cpuset *trialcs, if (cs == &top_cpuset) return -EACCES;
+ if (!dynamic_affinity_enabled()) + return -EPERM; + /* * An empty prefer_cpus is ok which mean that the cpuset tasks disable * dynamic affinity feature. diff --git a/kernel/fork.c b/kernel/fork.c index 38a2d6d026b2..8483e5e7d920 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -626,7 +626,8 @@ void free_task(struct task_struct *tsk) free_kthread_struct(tsk); bpf_task_storage_free(tsk); #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY - sched_prefer_cpus_free(tsk); + if (dynamic_affinity_enabled()) + sched_prefer_cpus_free(tsk); #endif free_task_struct(tsk); } @@ -2365,9 +2366,11 @@ __latent_entropy struct task_struct *copy_process( rt_mutex_init_task(p);
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY - retval = sched_prefer_cpus_fork(p, current->prefer_cpus); - if (retval) - goto bad_fork_free; + if (dynamic_affinity_enabled()) { + retval = sched_prefer_cpus_fork(p, current->prefer_cpus); + if (retval) + goto bad_fork_free; + } #endif
lockdep_assert_irqs_enabled(); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a1cebed8dae8..58c274b655ab 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11635,6 +11635,9 @@ static int __set_prefer_cpus_ptr(struct task_struct *p, struct rq *rq; int ret = 0;
+ if (!dynamic_affinity_enabled()) + return -EPERM; + if (unlikely(!p->prefer_cpus)) return -EINVAL;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 1fe9aefc7baf..eee2d05dc90a 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1040,8 +1040,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P_SCHEDSTAT(nr_wakeups_passive); P_SCHEDSTAT(nr_wakeups_idle); #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY - P_SCHEDSTAT(nr_wakeups_preferred_cpus); - P_SCHEDSTAT(nr_wakeups_force_preferred_cpus); + if (dynamic_affinity_enabled()) { + P_SCHEDSTAT(nr_wakeups_preferred_cpus); + P_SCHEDSTAT(nr_wakeups_force_preferred_cpus); + } #endif
avg_atom = p->se.sum_exec_runtime; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 45b93abc933d..0a554d13adbf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8019,8 +8019,21 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) }
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY + +DEFINE_STATIC_KEY_FALSE(__dynamic_affinity_switch); + +static int __init dynamic_affinity_switch_setup(char *__unused) +{ + static_branch_enable(&__dynamic_affinity_switch); + return 1; +} +__setup("dynamic_affinity", dynamic_affinity_switch_setup); + static inline bool prefer_cpus_valid(struct task_struct *p) { + if (!dynamic_affinity_enabled()) + return false; + return p->prefer_cpus && !cpumask_empty(p->prefer_cpus) && !cpumask_equal(p->prefer_cpus, p->cpus_ptr) &&