From: Shakeel Butt shakeelb@google.com
mainline inclusion from mainline-v5.3-rc1 commit 5eee7e1cdb97123bb55ac14ccd3af8b6edc31537 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I6E9D3 CVE: NA
--------------------------------
dump_tasks() traverses all the existing processes even for the memcg OOM context which is not only unnecessary but also wasteful. This imposes a long RCU critical section even from a contained context which can be quite disruptive.
Change dump_tasks() to be aligned with select_bad_process and use mem_cgroup_scan_tasks to selectively traverse only processes of the target memcg hierarchy during memcg OOM.
Link: http://lkml.kernel.org/r/20190617231207.160865-1-shakeelb@google.com Signed-off-by: Shakeel Butt shakeelb@google.com Acked-by: Michal Hocko mhocko@suse.com Acked-by: Roman Gushchin guro@fb.com Cc: Johannes Weiner hannes@cmpxchg.org Cc: Tetsuo Handa penguin-kernel@I-love.SAKURA.ne.jp Cc: Vladimir Davydov vdavydov.dev@gmail.com Cc: David Rientjes rientjes@google.com Cc: KOSAKI Motohiro kosaki.motohiro@jp.fujitsu.com Cc: Paul Jackson pj@sgi.com Cc: Nick Piggin npiggin@suse.de Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org
Conflicts: mm/oom_kill.c Signed-off-by: Ma Wupeng mawupeng1@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Reviewed-by: Nanyong Sun sunnanyong@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- mm/oom_kill.c | 121 +++++++++++++++++++++++++++++--------------------- 1 file changed, 70 insertions(+), 51 deletions(-)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 020fb6ac2497..41fcb8e45319 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -440,10 +440,60 @@ static void select_bad_process(struct oom_control *oc) oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages; }
+static int dump_task(struct task_struct *p, void *arg) +{ + struct oom_control *oc = arg; + struct task_struct *task; + struct sp_proc_stat *stat; + + if (oom_unkillable_task(p, NULL, oc->nodemask)) + return 0; + + task = find_lock_task_mm(p); + if (!task) { + /* + * This is a kthread or all of p's threads have already + * detached their mm's. There's no need to report + * them; they can't be oom killed anyway. + */ + return 0; + } + + if (ascend_sp_oom_show()) { + stat = sp_get_proc_stat_ref(task->mm); + + pr_cont("[%7d] %5d %5d %8lu %8lu ", + task->pid, from_kuid(&init_user_ns, task_uid(task)), + task->tgid, task->mm->total_vm, get_mm_rss(task->mm)); + if (!stat) + pr_cont("%-9c %-9c ", '-', '-'); + else { + pr_cont("%-9lld %-9lld ", /* byte to KB */ + (long long)atomic64_read(&stat->alloc_size) >> 10, + (long long)atomic64_read(&stat->k2u_size) >> 10); + sp_proc_stat_drop(stat); + } + pr_cont("%8ld %8lu %5hd %s\n", + mm_pgtables_bytes(task->mm), + get_mm_counter(task->mm, MM_SWAPENTS), + task->signal->oom_score_adj, task->comm); + } else { + pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n", + task->pid, from_kuid(&init_user_ns, task_uid(task)), + task->tgid, task->mm->total_vm, get_mm_rss(task->mm), + mm_pgtables_bytes(task->mm), + get_mm_counter(task->mm, MM_SWAPENTS), + task->signal->oom_score_adj, task->comm); + } + + task_unlock(task); + + return 0; +} + /** * dump_tasks - dump current memory state of all system tasks - * @memcg: current's memory controller, if constrained - * @nodemask: nodemask passed to page allocator for mempolicy ooms + * @oc: pointer to struct oom_control * * Dumps the current memory state of all eligible tasks. Tasks not in the same * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes @@ -451,12 +501,8 @@ static void select_bad_process(struct oom_control *oc) * State information includes task's pid, uid, tgid, vm size, rss, * pgtables_bytes, swapents, oom_score_adj value, and name. */ -static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) +static void dump_tasks(struct oom_control *oc) { - struct task_struct *p; - struct task_struct *task; - struct sp_proc_stat *stat; - if (ascend_sp_oom_show()) { pr_info("Tasks state (memory values in pages, share pool memory values in KB):\n"); pr_info("[ pid ] uid tgid total_vm rss sp_alloc sp_k2u pgtables_bytes swapents oom_score_adj name\n"); @@ -465,50 +511,16 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n"); }
- rcu_read_lock(); - for_each_process(p) { - if (oom_unkillable_task(p, memcg, nodemask)) - continue; - - task = find_lock_task_mm(p); - if (!task) { - /* - * This is a kthread or all of p's threads have already - * detached their mm's. There's no need to report - * them; they can't be oom killed anyway. - */ - continue; - } + if (is_memcg_oom(oc)) + mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); + else { + struct task_struct *p;
- if (ascend_sp_oom_show()) { - stat = sp_get_proc_stat_ref(task->mm); - - pr_cont("[%7d] %5d %5d %8lu %8lu ", - task->pid, from_kuid(&init_user_ns, task_uid(task)), - task->tgid, task->mm->total_vm, get_mm_rss(task->mm)); - if (!stat) - pr_cont("%-9c %-9c ", '-', '-'); - else { - pr_cont("%-9lld %-9lld ", /* byte to KB */ - (long long)atomic64_read(&stat->alloc_size) >> 10, - (long long)atomic64_read(&stat->k2u_size) >> 10); - sp_proc_stat_drop(stat); - } - pr_cont("%8ld %8lu %5hd %s\n", - mm_pgtables_bytes(task->mm), - get_mm_counter(task->mm, MM_SWAPENTS), - task->signal->oom_score_adj, task->comm); - } else { - pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n", - task->pid, from_kuid(&init_user_ns, task_uid(task)), - task->tgid, task->mm->total_vm, get_mm_rss(task->mm), - mm_pgtables_bytes(task->mm), - get_mm_counter(task->mm, MM_SWAPENTS), - task->signal->oom_score_adj, task->comm); - } - task_unlock(task); + rcu_read_lock(); + for_each_process(p) + dump_task(p, oc); + rcu_read_unlock(); } - rcu_read_unlock(); }
static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim) @@ -540,7 +552,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p) dump_unreclaimable_slab(); } if (sysctl_oom_dump_tasks) - dump_tasks(oc->memcg, oc->nodemask); + dump_tasks(oc); if (p) dump_oom_summary(oc, p); } @@ -1158,6 +1170,13 @@ int hisi_oom_notifier_call(unsigned long val, void *v) { int ret; unsigned long freed = 0; + struct oom_control oc = { + .zonelist = NULL, + .nodemask = NULL, + .memcg = NULL, + .gfp_mask = GFP_KERNEL, + .order = 0, + };
/* when enable oom killer, just return */ if (sysctl_enable_oom_killer == 1) @@ -1171,7 +1190,7 @@ int hisi_oom_notifier_call(unsigned long val, void *v) show_mem(SHOW_MEM_FILTER_NODES, NULL); spg_overview_show(NULL); spa_overview_show(NULL); - dump_tasks(NULL, 0); + dump_tasks(&oc); last_jiffies = jiffies; }