hulk inclusion category: feature bugzilla: https://atomgit.com/openeuler/kernel/issues/8929 ---------------------------------------- Move prefer_cpus_valid() and task_prefer_cpus() from kernel/sched/fair.c to kernel/sched/sched.h. This exposes these dynamic affinity feature helpers to other scheduler source files, allowing them to be invoked outside of fair.c. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- kernel/sched/fair.c | 40 ---------------------------------------- kernel/sched/sched.h | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 40 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 007d42c553e5..807faef39ddc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6955,7 +6955,6 @@ static DEFINE_MUTEX(smart_grid_used_mutex); static unsigned long capacity_of(int cpu); static int sched_idle_cpu(int cpu); static unsigned long cpu_runnable(struct rq *rq); -static inline bool prefer_cpus_valid(struct task_struct *p); struct static_key __smart_grid_used; @@ -6969,18 +6968,6 @@ static void smart_grid_usage_dec(void) static_key_slow_dec(&__smart_grid_used); } -static inline struct cpumask *task_prefer_cpus(struct task_struct *p) -{ - if (!smart_grid_used() || - !task_group(p)->auto_affinity) - return p->prefer_cpus; - - if (task_group(p)->auto_affinity->mode == 0) - return (void *)p->cpus_ptr; - - return sched_grid_prefer_cpus(p); -} - static inline int dynamic_affinity_mode(struct task_struct *p) { if (smart_grid_used()) @@ -7444,13 +7431,6 @@ int tg_rebuild_affinity_domains(int cpu, struct auto_affinity *auto_affi) static void __maybe_unused destroy_auto_affinity(struct task_group *tg) {} #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY -static inline bool prefer_cpus_valid(struct task_struct *p); - -static inline struct cpumask *task_prefer_cpus(struct task_struct *p) -{ - return p->prefer_cpus; -} - static inline int dynamic_affinity_mode(struct task_struct *p) { return 0; @@ -9156,13 +9136,6 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) } #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY -#ifdef CONFIG_SCHED_PARAL -bool sched_paral_used(void) -{ - return sched_feat(PARAL); -} -#endif - DEFINE_STATIC_KEY_FALSE(__dynamic_affinity_switch); static int __init dynamic_affinity_switch_setup(char *str) @@ -9187,19 +9160,6 @@ static int __init dynamic_affinity_switch_setup(char *str) } __setup("dynamic_affinity=", dynamic_affinity_switch_setup); -static inline bool prefer_cpus_valid(struct task_struct *p) -{ - struct cpumask *prefer_cpus = task_prefer_cpus(p); - - if (dynamic_affinity_enabled() || sched_paral_used()) { - return !cpumask_empty(prefer_cpus) && - !cpumask_equal(prefer_cpus, p->cpus_ptr) && - cpumask_subset(prefer_cpus, p->cpus_ptr); - } - - return false; -} - static inline unsigned long taskgroup_cpu_util(struct task_group *tg, int cpu) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5d68f3a66d28..d6bb2cc0b703 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -11,6 +11,7 @@ #include <linux/sched/cpufreq.h> #include <linux/sched/deadline.h> #include <linux/sched.h> +#include <linux/sched/grid_qos.h> #include <linux/sched/loadavg.h> #include <linux/sched/mm.h> #include <linux/sched/rseq_api.h> @@ -3788,4 +3789,45 @@ static inline int destroy_soft_domain(struct task_group *tg) #endif +#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY +#ifdef CONFIG_QOS_SCHED_SMART_GRID +static inline struct cpumask *task_prefer_cpus(struct task_struct *p) +{ + if (!smart_grid_used() || + !task_group(p)->auto_affinity) + return p->prefer_cpus; + + if (task_group(p)->auto_affinity->mode == 0) + return (void *)p->cpus_ptr; + + return sched_grid_prefer_cpus(p); +} +#else +static inline struct cpumask *task_prefer_cpus(struct task_struct *p) +{ + return p->prefer_cpus; +} +#endif /* CONFIG_QOS_SCHED_SMART_GRID */ + +#ifdef CONFIG_SCHED_PARAL +static inline bool sched_paral_used(void) +{ + return sched_feat(PARAL); +} +#endif + +static inline bool prefer_cpus_valid(struct task_struct *p) +{ + struct cpumask *prefer_cpus = task_prefer_cpus(p); + + if (dynamic_affinity_enabled() || sched_paral_used()) { + return !cpumask_empty(prefer_cpus) && + !cpumask_equal(prefer_cpus, p->cpus_ptr) && + cpumask_subset(prefer_cpus, p->cpus_ptr); + } + + return false; +} +#endif /* CONFIG_QOS_SCHED_DYNAMIC_AFFINITY */ + #endif /* _KERNEL_SCHED_SCHED_H */ -- 2.34.1