hulk inclusion category: bugfix bugzilla: 51349 CVE: NA
-------------------------------------------------
This patchset https://patchwork.kernel.org/project/linux-block/cover/20190826111627.7505-1... will cause perfmance regression, so revert it and use another way to fix the warning introduced by fix CVE-2021-27365.
Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/slab_common.c | 8 ++------ mm/slob.c | 20 ++++---------------- mm/slub.c | 14 +++----------- 3 files changed, 9 insertions(+), 33 deletions(-)
diff --git a/mm/slab_common.c b/mm/slab_common.c index 321a9abed5d9d..6b1cbf89a6861 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1252,16 +1252,12 @@ void __init create_kmalloc_caches(slab_flags_t flags) */ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) { - void *ret = NULL; + void *ret; struct page *page;
flags |= __GFP_COMP; page = alloc_pages(flags, order); - if (likely(page)) { - ret = page_address(page); - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - 1 << order); - } + ret = page ? page_address(page) : NULL; kmemleak_alloc(ret, size, 1, flags); kasan_kmalloc_large(ret, size, flags); return ret; diff --git a/mm/slob.c b/mm/slob.c index 8165d90db1adc..fdf284009be92 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -190,7 +190,7 @@ static int slob_last(slob_t *s)
static void *slob_new_pages(gfp_t gfp, int order, int node) { - struct page *page; + void *page;
#ifdef CONFIG_NUMA if (node != NUMA_NO_NODE) @@ -202,21 +202,14 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) if (!page) return NULL;
- mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - 1 << order); return page_address(page); }
static void slob_free_pages(void *b, int order) { - struct page *sp = virt_to_page(b); - if (current->reclaim_state) current->reclaim_state->reclaimed_slab += 1 << order; - - mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, - -(1 << order)); - __free_pages(sp, order); + free_pages((unsigned long)b, order); }
/* @@ -524,13 +517,8 @@ void kfree(const void *block) int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); slob_free(m, *m + align); - } else { - unsigned int order = compound_order(sp); - mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, - -(1 << order)); - __free_pages(sp, order); - - } + } else + __free_pages(sp, compound_order(sp)); } EXPORT_SYMBOL(kfree);
diff --git a/mm/slub.c b/mm/slub.c index 0d69d5b3ceefe..af7343a744091 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3793,15 +3793,11 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) { struct page *page; void *ptr = NULL; - unsigned int order = get_order(size);
flags |= __GFP_COMP; - page = alloc_pages_node(node, flags, order); - if (page) { + page = alloc_pages_node(node, flags, get_order(size)); + if (page) ptr = page_address(page); - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - 1 << order); - }
kmalloc_large_node_hook(ptr, size, flags); return ptr; @@ -3936,13 +3932,9 @@ void kfree(const void *x)
page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { - unsigned int order = compound_order(page); - BUG_ON(!PageCompound(page)); kfree_hook(object); - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - -(1 << order)); - __free_pages(page, order); + __free_pages(page, compound_order(page)); return; } slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);