From: Yang Yingliang yangyingliang@huawei.com
mainline inclusion from mainline-v6.11-rc1 commit 31b164e2e4af84d08d2498083676e7eeaa102493 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAOXYI CVE: CVE-2024-44958
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Introduce sched_smt_present_inc/dec() helper, so it can be called in normal or error path simply. No functional changed.
Cc: stable@kernel.org Signed-off-by: Yang Yingliang yangyingliang@huawei.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Link: https://lore.kernel.org/r/20240703031610.587047-2-yangyingliang@huaweicloud....
Conflicts: kernel/sched/core.c [Some contexts around sched_cpu_deactivate different. No functional impact.] Signed-off-by: Zheng Zucheng zhengzucheng@huawei.com --- kernel/sched/core.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a87b7b3a6c12..4e478cc7612c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5829,18 +5829,31 @@ static int cpuset_cpu_inactive(unsigned int cpu) return 0; }
+static inline void sched_smt_present_inc(int cpu) +{ +#ifdef CONFIG_SCHED_SMT + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) + static_branch_inc_cpuslocked(&sched_smt_present); +#endif +} + +static inline void sched_smt_present_dec(int cpu) +{ +#ifdef CONFIG_SCHED_SMT + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) + static_branch_dec_cpuslocked(&sched_smt_present); +#endif +} + int sched_cpu_activate(unsigned int cpu) { struct rq *rq = cpu_rq(cpu); struct rq_flags rf;
-#ifdef CONFIG_SCHED_SMT /* * When going up, increment the number of cores with SMT present. */ - if (cpumask_weight(cpu_smt_mask(cpu)) == 2) - static_branch_inc_cpuslocked(&sched_smt_present); -#endif + sched_smt_present_inc(cpu); set_cpu_active(cpu, true); tg_update_affinity_domains(cpu, 1);
@@ -5884,13 +5897,10 @@ int sched_cpu_deactivate(unsigned int cpu) */ synchronize_rcu_mult(call_rcu, call_rcu_sched);
-#ifdef CONFIG_SCHED_SMT /* * When going down, decrement the number of cores with SMT present. */ - if (cpumask_weight(cpu_smt_mask(cpu)) == 2) - static_branch_dec_cpuslocked(&sched_smt_present); -#endif + sched_smt_present_dec(cpu);
if (!sched_smp_initialized) return 0;