hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7A718
--------------------------------
There are worse performance with the 'Fixes' when running "./lat_ctx -P $SYNC_MAX -s 64 16".
The 'Fixes' which allocates memory for p->prefer_cpus even if "prefer_cpus" not be set.
Before the 'Fixes', only test "p->prefer_cpus", after, add test "!cpumask_empty(p->prefer_cpus)" which causing performance degradation.
select_task_rq_fair ->set_task_select_cpus ->prefer_cpus_valid ---- test cpumask_empty(p->prefer_cpus)
Fixes: ebeb84ad373d ("cpuset: Introduce new interface for scheduler ...") Signed-off-by: Hui Tang tanghui20@huawei.com --- fs/proc/base.c | 6 ++++++ include/linux/sched.h | 1 + kernel/cgroup/cpuset.c | 3 +++ kernel/sched/fair.c | 24 ++++++++++++++++++++++++ 4 files changed, 34 insertions(+)
diff --git a/fs/proc/base.c b/fs/proc/base.c index 5836b4b25052..22c65289128e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3298,6 +3298,12 @@ static ssize_t preferred_cpuset_write(struct file *file, const char __user *buf, if (retval < 0) goto out_free_cpumask;
+ if (!cpumask_empty(new_mask)) { + cpus_read_lock(); + dynamic_affinity_enable(); + cpus_read_unlock(); + } + retval = count;
out_free_cpumask: diff --git a/include/linux/sched.h b/include/linux/sched.h index 3b8f72bf0c6b..3aae225f98a7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2227,6 +2227,7 @@ int set_prefer_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask); int sched_prefer_cpus_fork(struct task_struct *p, struct cpumask *mask); void sched_prefer_cpus_free(struct task_struct *p); +void dynamic_affinity_enable(void); #endif
#ifdef CONFIG_BPF_SCHED diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 2529a807599b..90de01cc6827 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -734,6 +734,9 @@ static int update_prefer_cpumask(struct cpuset *cs, struct cpuset *trialcs,
update_tasks_prefer_cpumask(trialcs);
+ if (!cpumask_empty(trialcs->prefer_cpus)) + dynamic_affinity_enable(); + spin_lock_irq(&callback_lock); cpumask_copy(cs->prefer_cpus, trialcs->prefer_cpus); spin_unlock_irq(&callback_lock); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c7b560ffb75e..ff209d25c21c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7067,6 +7067,27 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) }
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY + +#ifdef CONFIG_JUMP_LABEL +static DEFINE_STATIC_KEY_FALSE(__dynamic_affinity_used); + +static inline bool dynamic_affinity_used(void) +{ + return static_branch_unlikely(&__dynamic_affinity_used); +} + +void dynamic_affinity_enable(void) +{ + static_branch_enable_cpuslocked(&__dynamic_affinity_used); +} + +#else /* CONFIG_JUMP_LABEL */ +static bool dynamic_affinity_used(void) +{ + return true; +} +#endif + /* * Low utilization threshold for CPU * @@ -7076,6 +7097,9 @@ int sysctl_sched_util_low_pct = 85;
static inline bool prefer_cpus_valid(struct task_struct *p) { + if (!dynamic_affinity_used()) + return false; + return p->prefer_cpus && !cpumask_empty(p->prefer_cpus) && !cpumask_equal(p->prefer_cpus, p->cpus_ptr) &&