From: Roman Gushchin guro@fb.com
mainline inclusion from mainline-5.3-rc1 commit 6cea1d569d24af6f9e95f70cb301807440ae2981 category: bugfix bugzilla: 34611 CVE: NA
-------------------------------------------------
Currently the page accounting code is duplicated in SLAB and SLUB internals. Let's move it into new (un)charge_slab_page helpers in the slab_common.c file. These helpers will be responsible for statistics (global and memcg-aware) and memcg charging. So they are replacing direct memcg_(un)charge_slab() calls.
Link: http://lkml.kernel.org/r/20190611231813.3148843-6-guro@fb.com Signed-off-by: Roman Gushchin guro@fb.com Reviewed-by: Shakeel Butt shakeelb@google.com Acked-by: Christoph Lameter cl@linux.com Acked-by: Vladimir Davydov vdavydov.dev@gmail.com Acked-by: Johannes Weiner hannes@cmpxchg.org Cc: Michal Hocko mhocko@suse.com Cc: Waiman Long longman@redhat.com Cc: David Rientjes rientjes@google.com Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Cc: Pekka Enberg penberg@kernel.org Cc: Andrei Vagin avagin@gmail.com Cc: Qian Cai cai@lca.pw Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 6cea1d569d24af6f9e95f70cb301807440ae2981) Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com
Conflicts: mm/slab.h
Signed-off-by: Liu Shixin liushixin2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/slab.c | 19 +++---------------- mm/slab.h | 25 +++++++++++++++++++++++++ mm/slub.c | 14 ++------------ 3 files changed, 30 insertions(+), 28 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c index a04e81dbbcdb..a818297ef524 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1405,7 +1405,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct page *page; - int nr_pages;
flags |= cachep->allocflags;
@@ -1415,17 +1414,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, return NULL; }
- if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) { + if (charge_slab_page(page, flags, cachep->gfporder, cachep)) { __free_pages(page, cachep->gfporder); return NULL; }
- nr_pages = (1 << cachep->gfporder); - if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages); - else - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages); - __SetPageSlab(page); /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ if (sk_memalloc_socks() && page_is_pfmemalloc(page)) @@ -1440,12 +1433,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, static void kmem_freepages(struct kmem_cache *cachep, struct page *page) { int order = cachep->gfporder; - unsigned long nr_freed = (1 << order); - - if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed); - else - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
BUG_ON(!PageSlab(page)); __ClearPageSlabPfmemalloc(page); @@ -1454,8 +1441,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page) page->mapping = NULL;
if (current->reclaim_state) - current->reclaim_state->reclaimed_slab += nr_freed; - memcg_uncharge_slab(page, order, cachep); + current->reclaim_state->reclaimed_slab += 1 << order; + uncharge_slab_page(page, order, cachep); __free_pages(page, order); }
diff --git a/mm/slab.h b/mm/slab.h index 0875314b1210..47dc964e9f9d 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -205,6 +205,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+static inline int cache_vmstat_idx(struct kmem_cache *s) +{ + return (s->flags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE; +} + #ifdef CONFIG_MEMCG_KMEM
/* List of all root caches. */ @@ -350,6 +356,25 @@ static inline void memcg_link_cache(struct kmem_cache *s,
#endif /* CONFIG_MEMCG_KMEM */
+static __always_inline int charge_slab_page(struct page *page, + gfp_t gfp, int order, + struct kmem_cache *s) +{ + int ret = memcg_charge_slab(page, gfp, order, s); + + if (!ret) + mod_lruvec_page_state(page, cache_vmstat_idx(s), 1 << order); + + return ret; +} + +static __always_inline void uncharge_slab_page(struct page *page, int order, + struct kmem_cache *s) +{ + mod_lruvec_page_state(page, cache_vmstat_idx(s), -(1 << order)); + memcg_uncharge_slab(page, order, s); +} + static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) { struct kmem_cache *cachep; diff --git a/mm/slub.c b/mm/slub.c index 4104c266580a..1e378b4a6942 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1457,7 +1457,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, else page = __alloc_pages_node(node, flags, order);
- if (page && memcg_charge_slab(page, flags, order, s)) { + if (page && charge_slab_page(page, flags, order, s)) { __free_pages(page, order); page = NULL; } @@ -1649,11 +1649,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if (!page) return NULL;
- mod_lruvec_page_state(page, - (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - 1 << oo_order(oo)); - inc_slabs_node(s, page_to_nid(page), page->objects);
return page; @@ -1687,18 +1682,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page) check_object(s, page, p, SLUB_RED_INACTIVE); }
- mod_lruvec_page_state(page, - (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - -pages); - __ClearPageSlabPfmemalloc(page); __ClearPageSlab(page);
page->mapping = NULL; if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; - memcg_uncharge_slab(page, order, s); + uncharge_slab_page(page, order, s); __free_pages(page, order); }