hulk inclusion category: bugfix bugzilla: 51349 CVE: NA
-------------------------------------------------
This patchset https://patchwork.kernel.org/project/linux-block/cover/20190826111627.7505-1... will cause perfmance regression, so revert it and use another way to fix the warning introduced by fix CVE-2021-27365.
Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/slab.h | 4 ---- mm/slab_common.c | 11 +---------- mm/slob.c | 41 +++++++++++------------------------------ 3 files changed, 12 insertions(+), 44 deletions(-)
diff --git a/include/linux/slab.h b/include/linux/slab.h index 5785b836e9fd7..788f04a7ca766 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -492,10 +492,6 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) * kmalloc is the normal method of allocating memory * for objects smaller than page size in the kernel. * - * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN - * bytes. For @size of power of two bytes, the alignment is also guaranteed - * to be at least to the size. - * * The @flags argument may be one of: * * %GFP_USER - Allocate memory on behalf of user. May sleep. diff --git a/mm/slab_common.c b/mm/slab_common.c index 6b1cbf89a6861..d208b47e01a8e 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -999,19 +999,10 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, unsigned int useroffset, unsigned int usersize) { int err; - unsigned int align = ARCH_KMALLOC_MINALIGN;
s->name = name; s->size = s->object_size = size; - - /* - * For power of two sizes, guarantee natural alignment for kmalloc - * caches, regardless of SL*B debugging options. - */ - if (is_power_of_2(size)) - align = max(align, size); - s->align = calculate_alignment(flags, align, size); - + s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); s->useroffset = useroffset; s->usersize = usersize;
diff --git a/mm/slob.c b/mm/slob.c index fdf284009be92..307c2c9feb441 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -215,7 +215,7 @@ static void slob_free_pages(void *b, int order) /* * Allocate a slob block within a given slob_page sp. */ -static void *slob_page_alloc(struct page *sp, size_t size, int align, int align_offset) +static void *slob_page_alloc(struct page *sp, size_t size, int align) { slob_t *prev, *cur, *aligned = NULL; int delta = 0, units = SLOB_UNITS(size); @@ -223,17 +223,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align, int align_ for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { slobidx_t avail = slob_units(cur);
- /* - * 'aligned' will hold the address of the slob block so that the - * address 'aligned'+'align_offset' is aligned according to the - * 'align' parameter. This is for kmalloc() which prepends the - * allocated block with its size, so that the block itself is - * aligned when needed. - */ if (align) { - aligned = (slob_t *) - (ALIGN((unsigned long)cur + align_offset, align) - - align_offset); + aligned = (slob_t *)ALIGN((unsigned long)cur, align); delta = aligned - cur; } if (avail >= units + delta) { /* room enough? */ @@ -275,8 +266,7 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align, int align_ /* * slob_alloc: entry point into the slob allocator. */ -static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, - int align_offset) +static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) { struct page *sp; struct list_head *prev; @@ -308,7 +298,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
/* Attempt to alloc */ prev = sp->lru.prev; - b = slob_page_alloc(sp, size, align, align_offset); + b = slob_page_alloc(sp, size, align); if (!b) continue;
@@ -336,7 +326,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, INIT_LIST_HEAD(&sp->lru); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob_page_free(sp, slob_list); - b = slob_page_alloc(sp, size, align, align_offset); + b = slob_page_alloc(sp, size, align); BUG_ON(!b); spin_unlock_irqrestore(&slob_lock, flags); } @@ -438,7 +428,7 @@ static __always_inline void * __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) { unsigned int *m; - int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); void *ret;
gfp &= gfp_allowed_mask; @@ -446,28 +436,19 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) fs_reclaim_acquire(gfp); fs_reclaim_release(gfp);
- if (size < PAGE_SIZE - minalign) { - int align = minalign; - - /* - * For power of two sizes, guarantee natural alignment for - * kmalloc()'d objects. - */ - if (is_power_of_2(size)) - align = max(minalign, (int) size); - + if (size < PAGE_SIZE - align) { if (!size) return ZERO_SIZE_PTR;
- m = slob_alloc(size + minalign, gfp, align, node, minalign); + m = slob_alloc(size + align, gfp, align, node);
if (!m) return NULL; *m = size; - ret = (void *)m + minalign; + ret = (void *)m + align;
trace_kmalloc_node(caller, ret, - size, size + minalign, gfp, node); + size, size + align, gfp, node); } else { unsigned int order = get_order(size);
@@ -563,7 +544,7 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) fs_reclaim_release(flags);
if (c->size < PAGE_SIZE) { - b = slob_alloc(c->size, flags, c->align, node, 0); + b = slob_alloc(c->size, flags, c->align, node); trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, SLOB_UNITS(c->size) * SLOB_UNIT, flags, node);