*** BLURB HERE ***
Eric W. Biederman (1): [Backport] exit: Use the correct exit_code in /proc/<pid>/stat
Oleg Nesterov (3): [Backport] fs/proc: do_task_stat: use __for_each_thread() [Backport] fs/proc: do_task_stat: move thread_group_cputime_adjusted() outside of lock_task_sighand() [Backport] fs/proc: do_task_stat: use sig->stats_lock to gather the threads/children stats
fs/proc/array.c | 69 +++++++++++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 28 deletions(-)
From: "Eric W. Biederman" ebiederm@xmission.com
mainline inclusion from mainline-v5.17-rc1 commit 2d18f7f456209ed8a8fc138b8bc535dbdaf84695 category: bugfix bugzilla: 189780 CVE: CVE-2024-26686
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Since do_proc_statt was modified to return process wide values instead of per task values the exit_code calculation has never been updated. Update it now to return the process wide exit_code when it is requested and available.
History-Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git Fixes: bf719d26a5c1 ("[PATCH] distinct tgid/tid CPU usage") Link: https://lkml.kernel.org/r/20220103213312.9144-4-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" ebiederm@xmission.com Signed-off-by: Zhao Wenhui zhaowenhui8@huawei.com --- fs/proc/array.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/fs/proc/array.c b/fs/proc/array.c index 989f7602035c..705028e535c5 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -462,6 +462,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, u64 cgtime, gtime; unsigned long rsslim = 0; unsigned long flags; + int exit_code = task->exit_code;
state = *get_task_state(task); vsize = eip = esp = 0; @@ -525,6 +526,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, maj_flt += sig->maj_flt; thread_group_cputime_adjusted(task, &utime, &stime); gtime += sig->gtime; + + if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED)) + exit_code = sig->group_exit_code; }
sid = task_session_nr_ns(task, ns); @@ -626,7 +630,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, seq_puts(m, " 0 0 0 0 0 0 0");
if (permitted) - seq_put_decimal_ll(m, " ", task->exit_code); + seq_put_decimal_ll(m, " ", exit_code); else seq_puts(m, " 0");
From: Oleg Nesterov oleg@redhat.com
mainline inclusion from mainline-v6.7-rc1 commit 7904e53ed5a20fc678c01d5d1b07ec486425bb6a category: bugfix bugzilla: 189780 CVE: CVE-2024-26686
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
do/while_each_thread should be avoided when possible.
Link: https://lkml.kernel.org/r/20230909164501.GA11581@redhat.com Signed-off-by: Oleg Nesterov oleg@redhat.com Cc: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Zhao Wenhui zhaowenhui8@huawei.com --- fs/proc/array.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/fs/proc/array.c b/fs/proc/array.c index 705028e535c5..de46cb910baa 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -515,12 +515,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
/* add up live thread stats at the group level */ if (whole) { - struct task_struct *t = task; - do { + struct task_struct *t; + + __for_each_thread(sig, t) { min_flt += t->min_flt; maj_flt += t->maj_flt; gtime += task_gtime(t); - } while_each_thread(task, t); + }
min_flt += sig->min_flt; maj_flt += sig->maj_flt;
From: Oleg Nesterov oleg@redhat.com
mainline inclusion from mainline-v6.8-rc4 commit 60f92acb60a989b14e4b744501a0df0f82ef30a3 category: bugfix bugzilla: 189780 CVE: CVE-2024-26686
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Patch series "fs/proc: do_task_stat: use sig->stats_".
do_task_stat() has the same problem as getrusage() had before "getrusage: use sig->stats_lock rather than lock_task_sighand()": a hard lockup. If NR_CPUS threads call lock_task_sighand() at the same time and the process has NR_THREADS, spin_lock_irq will spin with irqs disabled O(NR_CPUS * NR_THREADS) time.
This patch (of 3):
thread_group_cputime() does its own locking, we can safely shift thread_group_cputime_adjusted() which does another for_each_thread loop outside of ->siglock protected section.
Not only this removes for_each_thread() from the critical section with irqs disabled, this removes another case when stats_lock is taken with siglock held. We want to remove this dependency, then we can change the users of stats_lock to not disable irqs.
Link: https://lkml.kernel.org/r/20240123153313.GA21832@redhat.com Link: https://lkml.kernel.org/r/20240123153355.GA21854@redhat.com Signed-off-by: Oleg Nesterov oleg@redhat.com Signed-off-by: Dylan Hatch dylanbhatch@google.com Cc: Eric W. Biederman ebiederm@xmission.com Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org
Conflicts: fs/proc/array.c
Signed-off-by: Zhao Wenhui zhaowenhui8@huawei.com --- fs/proc/array.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/fs/proc/array.c b/fs/proc/array.c index de46cb910baa..7eec7cc2f42c 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -490,7 +490,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
sigemptyset(&sigign); sigemptyset(&sigcatch); - cutime = cstime = utime = stime = 0; + cutime = cstime = 0; cgtime = gtime = 0;
if (lock_task_sighand(task, &flags)) { @@ -525,7 +525,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
min_flt += sig->min_flt; maj_flt += sig->maj_flt; - thread_group_cputime_adjusted(task, &utime, &stime); gtime += sig->gtime;
if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED)) @@ -541,10 +540,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
if (permitted && (!whole || num_threads < 2)) wchan = get_wchan(task); - if (!whole) { + + if (whole) { + thread_group_cputime_adjusted(task, &utime, &stime); + } else { + task_cputime_adjusted(task, &utime, &stime); min_flt = task->min_flt; maj_flt = task->maj_flt; - task_cputime_adjusted(task, &utime, &stime); gtime = task_gtime(task); }
From: Oleg Nesterov oleg@redhat.com
mainline inclusion from mainline-v6.8-rc4 commit 7601df8031fd67310af891897ef6cc0df4209305 category: bugfix bugzilla: 189780 CVE: CVE-2024-26686
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
lock_task_sighand() can trigger a hard lockup. If NR_CPUS threads call do_task_stat() at the same time and the process has NR_THREADS, it will spin with irqs disabled O(NR_CPUS * NR_THREADS) time.
Change do_task_stat() to use sig->stats_lock to gather the statistics outside of ->siglock protected section, in the likely case this code will run lockless.
Link: https://lkml.kernel.org/r/20240123153357.GA21857@redhat.com Signed-off-by: Oleg Nesterov oleg@redhat.com Signed-off-by: Dylan Hatch dylanbhatch@google.com Cc: Eric W. Biederman ebiederm@xmission.com Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Zhao Wenhui zhaowenhui8@huawei.com --- fs/proc/array.c | 58 +++++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 26 deletions(-)
diff --git a/fs/proc/array.c b/fs/proc/array.c index 7eec7cc2f42c..d5fed9281863 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -456,13 +456,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, int permitted; struct mm_struct *mm; unsigned long long start_time; - unsigned long cmin_flt = 0, cmaj_flt = 0; - unsigned long min_flt = 0, maj_flt = 0; - u64 cutime, cstime, utime, stime; - u64 cgtime, gtime; + unsigned long cmin_flt, cmaj_flt, min_flt, maj_flt; + u64 cutime, cstime, cgtime, utime, stime, gtime; unsigned long rsslim = 0; unsigned long flags; int exit_code = task->exit_code; + struct signal_struct *sig = task->signal; + unsigned int seq = 1;
state = *get_task_state(task); vsize = eip = esp = 0; @@ -490,12 +490,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
sigemptyset(&sigign); sigemptyset(&sigcatch); - cutime = cstime = 0; - cgtime = gtime = 0;
if (lock_task_sighand(task, &flags)) { - struct signal_struct *sig = task->signal; - if (sig->tty) { struct pid *pgrp = tty_get_pgrp(sig->tty); tty_pgrp = pid_nr_ns(pgrp, ns); @@ -506,27 +502,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, num_threads = get_nr_threads(task); collect_sigign_sigcatch(task, &sigign, &sigcatch);
- cmin_flt = sig->cmin_flt; - cmaj_flt = sig->cmaj_flt; - cutime = sig->cutime; - cstime = sig->cstime; - cgtime = sig->cgtime; rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
- /* add up live thread stats at the group level */ if (whole) { - struct task_struct *t; - - __for_each_thread(sig, t) { - min_flt += t->min_flt; - maj_flt += t->maj_flt; - gtime += task_gtime(t); - } - - min_flt += sig->min_flt; - maj_flt += sig->maj_flt; - gtime += sig->gtime; - if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED)) exit_code = sig->group_exit_code; } @@ -541,6 +519,34 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, if (permitted && (!whole || num_threads < 2)) wchan = get_wchan(task);
+ do { + seq++; /* 2 on the 1st/lockless path, otherwise odd */ + flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); + + cmin_flt = sig->cmin_flt; + cmaj_flt = sig->cmaj_flt; + cutime = sig->cutime; + cstime = sig->cstime; + cgtime = sig->cgtime; + + if (whole) { + struct task_struct *t; + + min_flt = sig->min_flt; + maj_flt = sig->maj_flt; + gtime = sig->gtime; + + rcu_read_lock(); + __for_each_thread(sig, t) { + min_flt += t->min_flt; + maj_flt += t->maj_flt; + gtime += task_gtime(t); + } + rcu_read_unlock(); + } + } while (need_seqretry(&sig->stats_lock, seq)); + done_seqretry_irqrestore(&sig->stats_lock, seq, flags); + if (whole) { thread_group_cputime_adjusted(task, &utime, &stime); } else {