hulk inclusion category: bugfix bugzilla: 51349 CVE: NA
-------------------------------------------------
This patchset https://patchwork.kernel.org/project/linux-block/cover/20190826111627.7505-1... will cause perfmance regression, so revert it and use another way to fix the warning introduced by fix CVE-2021-27365.
Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/slab_common.c | 4 ++-- mm/slub.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/mm/slab_common.c b/mm/slab_common.c index b8b0df81bece3..321a9abed5d9d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1259,8 +1259,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) page = alloc_pages(flags, order); if (likely(page)) { ret = page_address(page); - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, - PAGE_SIZE << order); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, + 1 << order); } kmemleak_alloc(ret, size, 1, flags); kasan_kmalloc_large(ret, size, flags); diff --git a/mm/slub.c b/mm/slub.c index 12f23ceab1177..0d69d5b3ceefe 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3799,8 +3799,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) page = alloc_pages_node(node, flags, order); if (page) { ptr = page_address(page); - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, - PAGE_SIZE << order); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, + 1 << order); }
kmalloc_large_node_hook(ptr, size, flags); @@ -3940,8 +3940,8 @@ void kfree(const void *x)
BUG_ON(!PageCompound(page)); kfree_hook(object); - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, - -(PAGE_SIZE << order)); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, + -(1 << order)); __free_pages(page, order); return; }