From: "Tobin C. Harding" tobin@kernel.org
mainline inclusion from mainline-5.2-rc1 commit 6dfd1b653c49df2dad1dcfe063a196e940e02dbd category: bugfix bugzilla: 34611 CVE: NA
------------------------------------------------- SLUB allocator makes heavy use of ifdef/endif pre-processor macros. The pairing of these statements is at times hard to follow e.g. if the pair are further than a screen apart or if there are nested pairs. We can reduce cognitive load by adding a comment to the endif statement of form
#ifdef CONFIG_FOO ... #endif /* CONFIG_FOO */
Add comments to endif pre-processor macros if ifdef/endif pair is not immediately apparent.
Link: http://lkml.kernel.org/r/20190402230545.2929-5-tobin@kernel.org Signed-off-by: Tobin C. Harding tobin@kernel.org Acked-by: Christoph Lameter cl@linux.com Reviewed-by: Roman Gushchin guro@fb.com Acked-by: Vlastimil Babka vbabka@suse.cz Cc: David Rientjes rientjes@google.com Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Cc: Matthew Wilcox willy@infradead.org Cc: Pekka Enberg penberg@kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org (cherry picked from commit 6dfd1b653c49df2dad1dcfe063a196e940e02dbd) Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Liu Shixin liushixin2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/slub.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c index 9fedcaf69ebf..32bec9c3485e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1909,7 +1909,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, } } } while (read_mems_allowed_retry(cpuset_mems_cookie)); -#endif +#endif /* CONFIG_NUMA */ return NULL; }
@@ -2220,7 +2220,7 @@ static void unfreeze_partials(struct kmem_cache *s, discard_slab(s, page); stat(s, FREE_SLAB); } -#endif +#endif /* CONFIG_SLUB_CPU_PARTIAL */ }
/* @@ -2279,7 +2279,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) local_irq_restore(flags); } preempt_enable(); -#endif +#endif /* CONFIG_SLUB_CPU_PARTIAL */ }
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) @@ -2797,7 +2797,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, } EXPORT_SYMBOL(kmem_cache_alloc_node_trace); #endif -#endif +#endif /* CONFIG_NUMA */
/* * Slow path handling. This may still be called frequently since objects @@ -3842,7 +3842,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) return ret; } EXPORT_SYMBOL(__kmalloc_node); -#endif +#endif /* CONFIG_NUMA */
#ifdef CONFIG_HARDENED_USERCOPY /* @@ -4058,7 +4058,7 @@ void __kmemcg_cache_deactivate(struct kmem_cache *s) */ slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu); } -#endif +#endif /* CONFIG_MEMCG */
static int slab_mem_going_offline_callback(void *arg) { @@ -4695,7 +4695,7 @@ static int list_locations(struct kmem_cache *s, char *buf, len += sprintf(buf, "No data\n"); return len; } -#endif +#endif /* CONFIG_SLUB_DEBUG */
#ifdef SLUB_RESILIENCY_TEST static void __init resiliency_test(void) @@ -4755,7 +4755,7 @@ static void __init resiliency_test(void) #ifdef CONFIG_SYSFS static void resiliency_test(void) {}; #endif -#endif +#endif /* SLUB_RESILIENCY_TEST */
#ifdef CONFIG_SYSFS enum slab_stat_type { @@ -5421,7 +5421,7 @@ STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); -#endif +#endif /* CONFIG_SLUB_STATS */
static struct attribute *slab_attrs[] = { &slab_size_attr.attr, @@ -5623,7 +5623,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
if (buffer) free_page((unsigned long)buffer); -#endif +#endif /* CONFIG_MEMCG */ }
static void kmem_cache_release(struct kobject *k)