From: Hui Tang tanghui20@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7DA63 CVE: NA
--------------------------------
Add mutex lock to prevent negative count for jump label.
[28612.530675] ------------[ cut here ]------------ [28612.532708] jump label: negative count! [28612.535031] WARNING: CPU: 4 PID: 3899 at kernel/jump_label.c:202 __static_key_slow_dec_cpuslocked+0x204/0x240 [28612.538216] Kernel panic - not syncing: panic_on_warn set ... [28612.538216] [28612.540487] CPU: 4 PID: 3899 Comm: sh Kdump: loaded Not tainted [28612.542788] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996) [28612.546455] Call Trace: [28612.547339] dump_stack+0xc6/0x11e [28612.548546] ? __static_key_slow_dec_cpuslocked+0x200/0x240 [28612.550352] panic+0x1d6/0x46b [28612.551375] ? refcount_error_report+0x2a5/0x2a5 [28612.552915] ? kmsg_dump_rewind_nolock+0xde/0xde [28612.554358] ? sched_clock_cpu+0x18/0x1b0 [28612.555699] ? __warn+0x1d1/0x210 [28612.556799] ? __static_key_slow_dec_cpuslocked+0x204/0x240 [28612.558548] __warn+0x1ec/0x210 [28612.559621] ? __static_key_slow_dec_cpuslocked+0x204/0x240 [28612.561536] report_bug+0x1ee/0x2b0 [28612.562706] fixup_bug.part.4+0x37/0x80 [28612.563937] do_error_trap+0x21c/0x260 [28612.565109] ? fixup_bug.part.4+0x80/0x80 [28612.566453] ? check_preemption_disabled+0x34/0x1f0 [28612.567991] ? trace_hardirqs_off_thunk+0x1a/0x1c [28612.569534] ? lockdep_hardirqs_off+0x1cb/0x2b0 [28612.570993] ? error_entry+0x9a/0x130 [28612.572138] ? trace_hardirqs_off_caller+0x59/0x1a0 [28612.573710] ? trace_hardirqs_off_thunk+0x1a/0x1c [28612.575232] invalid_op+0x14/0x20 [root@lo[ca2lh8ost6 12.576387] ? vprintk_func+0x68/0x1a0 [28612.577827] ? __static_key_slow_dec_cpuslocked+0x204/0x240 smartg[ri2d]8# 612.579662] ? __static_key_slow_dec_cpuslocked+0x204/0x240 [28612.581781] ? static_key_disable+0x30/0x30 [28612.583248] ? s tatic_key_slow_dec+0x57/0x90 [28612.584997] ? tg_set_dynamic_affinity_mode+0x42/0x70 [28612.586714] ? cgroup_file_write+0x471/0x6a0 [28612.588162] ? cgroup_css.part.4+0x100/0x100 [28612.589579] ? cgroup_css.part.4+0x100/0x100 [28612.591031] ? kernfs_fop_write+0x2af/0x430 [28612.592625] ? kernfs_vma_page_mkwrite+0x230/0x230 [28612.594274] ? __vfs_write+0xef/0x680 [28612.595590] ? kernel_read+0x110/0x110 ea8612.596899] ? check_preemption_disabled+0x3mkd4ir/: 0canxno1t fcr0
Signed-off-by: Hui Tang tanghui20@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Zhang Changzhong zhangchangzhong@huawei.com --- kernel/sched/fair.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d2acee4..3041652 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5286,6 +5286,8 @@ static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} #define AUTO_AFFINITY_DEFAULT_PERIOD_MS 2000 #define IS_DOMAIN_SET(level, mask) ((1 << (level)) & (mask))
+static DEFINE_MUTEX(smart_grid_used_mutex); + static inline unsigned long cpu_util(int cpu); static unsigned long target_load(int cpu, int type); static unsigned long capacity_of(int cpu); @@ -5454,9 +5456,11 @@ void start_auto_affinity(struct auto_affinity *auto_affi) { ktime_t delay_ms;
+ mutex_lock(&smart_grid_used_mutex); raw_spin_lock_irq(&auto_affi->lock); if (auto_affi->period_active == 1) { raw_spin_unlock_irq(&auto_affi->lock); + mutex_unlock(&smart_grid_used_mutex); return; }
@@ -5469,15 +5473,18 @@ void start_auto_affinity(struct auto_affinity *auto_affi) raw_spin_unlock_irq(&auto_affi->lock);
smart_grid_usage_inc(); + mutex_unlock(&smart_grid_used_mutex); }
void stop_auto_affinity(struct auto_affinity *auto_affi) { struct affinity_domain *ad = &auto_affi->ad;
+ mutex_lock(&smart_grid_used_mutex); raw_spin_lock_irq(&auto_affi->lock); if (auto_affi->period_active == 0) { raw_spin_unlock_irq(&auto_affi->lock); + mutex_unlock(&smart_grid_used_mutex); return; }
@@ -5488,6 +5495,7 @@ void stop_auto_affinity(struct auto_affinity *auto_affi) raw_spin_unlock_irq(&auto_affi->lock);
smart_grid_usage_dec(); + mutex_unlock(&smart_grid_used_mutex); }
static struct sched_group *sd_find_idlest_group(struct sched_domain *sd) @@ -5693,7 +5701,7 @@ static void destroy_auto_affinity(struct task_group *tg) { struct auto_affinity *auto_affi = tg->auto_affinity;
- if (auto_affi->mode) + if (auto_affi->period_active) smart_grid_usage_dec();
hrtimer_cancel(&auto_affi->period_timer);