From: Ingo Molnar mingo@kernel.org
mainline inclusion from mainline-v6.1-rc1 commit 09348d75a6ce60eec85c86dd0ab7babc4db3caf6 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9QW75
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
There's no good reason to crash a user's system with a BUG_ON(), chances are high that they'll never even see the crash message on Xorg, and it won't make it into the syslog either.
By using a WARN_ON_ONCE() we at least give the user a chance to report any bugs triggered here - instead of getting silent hangs.
None of these WARN_ON_ONCE()s are supposed to trigger, ever - so we ignore cases where a NULL check is done via a BUG_ON() and we let a NULL pointer through after a WARN_ON_ONCE().
There's one exception: WARN_ON_ONCE() arguments with side-effects, such as locking - in this case we use the return value of the WARN_ON_ONCE(), such as in:
- BUG_ON(!lock_task_sighand(p, &flags)); + if (WARN_ON_ONCE(!lock_task_sighand(p, &flags))) + return;
Suggested-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Ingo Molnar mingo@kernel.org Link: https://lore.kernel.org/r/YvSsKcAXISmshtHo@gmail.com
Conflicts: kernel/sched/deadline.c kernel/sched/fair.c kernel/sched/core.c kernel/sched/sched.h [Some contexts around BUG_ON are different. No functional impact.] Signed-off-by: Zhao Wenhui zhaowenhui8@huawei.com --- kernel/sched/autogroup.c | 3 ++- kernel/sched/core.c | 2 +- kernel/sched/cpupri.c | 2 +- kernel/sched/deadline.c | 26 +++++++++++++------------- kernel/sched/fair.c | 10 +++++----- kernel/sched/rt.c | 2 +- kernel/sched/sched.h | 6 +++--- 7 files changed, 26 insertions(+), 25 deletions(-)
diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 640d4019deac..067f2e6d8546 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -139,7 +139,8 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) struct task_struct *t; unsigned long flags;
- BUG_ON(!lock_task_sighand(p, &flags)); + if (WARN_ON_ONCE(!lock_task_sighand(p, &flags))) + return;
prev = p->signal->autogroup; if (prev == ag) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7825ceaae0c4..fe9f91f39e2f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -948,7 +948,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, rq = cpu_rq(new_cpu);
rq_lock(rq, rf); - BUG_ON(task_cpu(p) != new_cpu); + WARN_ON_ONCE(task_cpu(p) != new_cpu); enqueue_task(rq, p, 0); p->on_rq = TASK_ON_RQ_QUEUED; check_preempt_curr(rq, p, 0); diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index daaadf939ccb..324af8dcadc0 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -66,7 +66,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, int idx = 0; int task_pri = convert_prio(p->prio);
- BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); + WARN_ON_ONCE(task_pri >= CPUPRI_NR_PRIORITIES);
for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 6c4f93af15db..43ecd27b37c8 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -157,7 +157,7 @@ void dl_change_utilization(struct task_struct *p, u64 new_bw) { struct rq *rq;
- BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV); + WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
if (task_on_rq_queued(p)) return; @@ -450,7 +450,7 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) struct task_struct *entry; bool leftmost = true;
- BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); + WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
while (*link) { parent = *link; @@ -545,7 +545,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p * Failed to find any suitable CPU. * The task will never come back! */ - BUG_ON(dl_bandwidth_enabled()); + WARN_ON_ONCE(dl_bandwidth_enabled());
/* * If admission control is disabled we @@ -701,7 +701,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct rq *rq = rq_of_dl_rq(dl_rq);
- BUG_ON(pi_se->dl_runtime <= 0); + WARN_ON_ONCE(pi_se->dl_runtime <= 0);
/* * This could be the case for a !-dl task that is boosted. @@ -1407,7 +1407,7 @@ static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) struct sched_dl_entity *entry; int leftmost = 1;
- BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); + WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
while (*link) { parent = *link; @@ -1443,7 +1443,7 @@ static void enqueue_dl_entity(struct sched_dl_entity *dl_se, struct sched_dl_entity *pi_se, int flags) { - BUG_ON(on_dl_rq(dl_se)); + WARN_ON_ONCE(on_dl_rq(dl_se));
/* * If this is a wakeup or a new instance, the scheduling @@ -1780,7 +1780,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) put_prev_task(rq, prev);
dl_se = pick_next_dl_entity(rq, dl_rq); - BUG_ON(!dl_se); + WARN_ON_ONCE(!dl_se);
p = dl_task_of(dl_se); p->se.exec_start = rq_clock_task(rq); @@ -1987,12 +1987,12 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost, struct task_struct, pushable_dl_tasks);
- BUG_ON(rq->cpu != task_cpu(p)); - BUG_ON(task_current(rq, p)); - BUG_ON(p->nr_cpus_allowed <= 1); + WARN_ON_ONCE(rq->cpu != task_cpu(p)); + WARN_ON_ONCE(task_current(rq, p)); + WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
- BUG_ON(!task_on_rq_queued(p)); - BUG_ON(!dl_task(p)); + WARN_ON_ONCE(!task_on_rq_queued(p)); + WARN_ON_ONCE(!dl_task(p));
return p; } @@ -2266,7 +2266,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, struct root_domain *src_rd; struct rq *rq;
- BUG_ON(!dl_task(p)); + WARN_ON_ONCE(!dl_task(p));
rq = task_rq(p); src_rd = rq->rd; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6d0ec315f7be..3bd5aa6dedb3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2411,7 +2411,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, if (!join) return;
- BUG_ON(irqs_disabled()); + WARN_ON_ONCE(irqs_disabled()); double_lock_irq(&my_grp->lock, &grp->lock);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { @@ -7595,7 +7595,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
find_matching_se(&se, &pse); update_curr(cfs_rq_of(se)); - BUG_ON(!pse); + WARN_ON_ONCE(!pse); if (wakeup_preempt_entity(se, pse) == 1) { /* * Bias pick_next to pick the sched entity that is @@ -8608,7 +8608,7 @@ static void attach_task(struct rq *rq, struct task_struct *p) { lockdep_assert_held(&rq->lock);
- BUG_ON(task_rq(p) != rq); + WARN_ON_ONCE(task_rq(p) != rq); activate_task(rq, p, ENQUEUE_NOCLOCK); p->on_rq = TASK_ON_RQ_QUEUED; check_preempt_curr(rq, p, 0); @@ -9916,7 +9916,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, goto out_balanced; }
- BUG_ON(busiest == env.dst_rq); + WARN_ON_ONCE(busiest == env.dst_rq);
schedstat_add(sd->lb_imbalance[idle], env.imbalance);
@@ -10212,7 +10212,7 @@ static int active_load_balance_cpu_stop(void *data) * we need to fix it. Originally reported by * Bjorn Helgaas on a 128-CPU setup. */ - BUG_ON(busiest_rq == target_rq); + WARN_ON_ONCE(busiest_rq == target_rq);
/* Search for an sd spanning us and the target CPU. */ rcu_read_lock(); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index ad893ec818cd..58364f489529 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -758,7 +758,7 @@ static void __disable_runtime(struct rq *rq) * We cannot be left wanting - that would mean some runtime * leaked out of the system. */ - BUG_ON(want); + WARN_ON_ONCE(want); balanced: /* * Disable all the borrow logic by pretending we have inf diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1d882a2b8d5f..4dd0e4de0aab 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2146,8 +2146,8 @@ static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock) { - BUG_ON(!irqs_disabled()); - BUG_ON(rq1 != rq2); + WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE(rq1 != rq2); raw_spin_lock(&rq1->lock); __acquire(rq2->lock); /* Fake it out ;) */ } @@ -2162,7 +2162,7 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) { - BUG_ON(rq1 != rq2); + WARN_ON_ONCE(rq1 != rq2); raw_spin_unlock(&rq1->lock); __release(rq2->lock); }
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/7676 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/T...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/7676 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/T...