hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IAS45L
-----------------------------------------
This reverts commit 995f0e60b7ef251174dfb2a5c89391a9f230403a.
RCU is an atomic operation, and calling a sleepable mutex in RCU is not allowed.
Fixes: 995f0e60b7ef ("sched: add mutex lock to protect qos_level") Signed-off-by: Cheng Yu serein.chengyu@huawei.com Signed-off-by: Zucheng Zheng zhengzucheng@huawei.com --- kernel/sched/core.c | 9 --------- kernel/sched/fair.c | 3 --- kernel/sched/sched.h | 4 ---- 3 files changed, 16 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 72abf7459829..7595a3fef28f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8642,13 +8642,6 @@ static inline int alloc_qos_sched_group(struct task_group *tg, #ifdef CONFIG_QOS_SCHED_SMT_EXPELLER tg->smt_expell = parent->smt_expell; #endif - tg->qos_level_mutex = kzalloc(sizeof(struct mutex), GFP_KERNEL); - - if (!tg->qos_level_mutex) - return 0; - - mutex_init(tg->qos_level_mutex); - return 1; }
@@ -9724,7 +9717,6 @@ static int tg_change_scheduler(struct task_group *tg, void *data) s64 qos_level = *(s64 *)data; struct cgroup_subsys_state *css = &tg->css;
- mutex_lock(tg->qos_level_mutex); tg->qos_level = qos_level; if (is_offline_level(qos_level)) policy = SCHED_IDLE; @@ -9742,7 +9734,6 @@ static int tg_change_scheduler(struct task_group *tg, void *data) sched_setscheduler(tsk, policy, ¶m); } css_task_iter_end(&it); - mutex_unlock(tg->qos_level_mutex);
return 0; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d742793567b2..45b8e13943db 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -13645,9 +13645,6 @@ void free_fair_sched_group(struct task_group *tg) kfree(tg->se[i]); }
-#ifdef CONFIG_QOS_SCHED - kfree(tg->qos_level_mutex); -#endif kfree(tg->cfs_rq); kfree(tg->se); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index dbd264f595a0..a6d7febf789d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -497,11 +497,7 @@ struct task_group { #else KABI_RESERVE(2) #endif -#ifdef CONFIG_QOS_SCHED - KABI_USE(3, struct mutex *qos_level_mutex) -#else KABI_RESERVE(3) -#endif #if defined(CONFIG_QOS_SCHED_SMART_GRID) && !defined(__GENKSYMS__) KABI_USE(4, struct auto_affinity *auto_affinity) #else