tree: https://gitee.com/openeuler/kernel.git OLK-5.10 head: cbfb00d89c3b9971c3f0732b4f34d4e674bff5d8 commit: e037ee4a8deaff7c579618c0aba1f066d6d14b11 [3775/30000] mm, page_alloc: disable pcplists during memory offline config: x86_64-randconfig-123-20240910 (https://download.01.org/0day-ci/archive/20240911/202409110302.AhDyE0gr-lkp@i...) compiler: gcc-12 (Debian 12.2.0-14) 12.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240911/202409110302.AhDyE0gr-lkp@i...)
If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot lkp@intel.com | Closes: https://lore.kernel.org/oe-kbuild-all/202409110302.AhDyE0gr-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
mm/page_alloc.c:3036:6: sparse: sparse: symbol '__drain_all_pages' was not declared. Should it be static? mm/page_alloc.c:6634:6: sparse: sparse: symbol '__zone_set_pageset_high_and_batch' was not declared. Should it be static?
mm/page_alloc.c: note: in included file (through include/linux/mm.h): include/linux/gfp.h:327:27: sparse: sparse: restricted gfp_t degrades to integer include/linux/gfp.h:327:27: sparse: sparse: restricted gfp_t degrades to integer
vim +/__drain_all_pages +3036 mm/page_alloc.c
3025 3026 /* 3027 * The implementation of drain_all_pages(), exposing an extra parameter to 3028 * drain on all cpus. 3029 * 3030 * drain_all_pages() is optimized to only execute on cpus where pcplists are 3031 * not empty. The check for non-emptiness can however race with a free to 3032 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 3033 * that need the guarantee that every CPU has drained can disable the 3034 * optimizing racy check. 3035 */
3036 void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3037 { 3038 int cpu; 3039 3040 /* 3041 * Allocate in the BSS so we wont require allocation in 3042 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 3043 */ 3044 static cpumask_t cpus_with_pcps; 3045 3046 /* 3047 * Make sure nobody triggers this path before mm_percpu_wq is fully 3048 * initialized. 3049 */ 3050 if (WARN_ON_ONCE(!mm_percpu_wq)) 3051 return; 3052 3053 /* 3054 * Do not drain if one is already in progress unless it's specific to 3055 * a zone. Such callers are primarily CMA and memory hotplug and need 3056 * the drain to be complete when the call returns. 3057 */ 3058 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 3059 if (!zone) 3060 return; 3061 mutex_lock(&pcpu_drain_mutex); 3062 } 3063 3064 /* 3065 * We don't care about racing with CPU hotplug event 3066 * as offline notification will cause the notified 3067 * cpu to drain that CPU pcps and on_each_cpu_mask 3068 * disables preemption as part of its processing 3069 */ 3070 for_each_online_cpu(cpu) { 3071 struct per_cpu_pageset *pcp; 3072 struct zone *z; 3073 bool has_pcps = false; 3074 3075 if (force_all_cpus) { 3076 /* 3077 * The pcp.count check is racy, some callers need a 3078 * guarantee that no cpu is missed. 3079 */ 3080 has_pcps = true; 3081 } else if (zone) { 3082 pcp = per_cpu_ptr(zone->pageset, cpu); 3083 if (pcp->pcp.count) 3084 has_pcps = true; 3085 } else { 3086 for_each_populated_zone(z) { 3087 pcp = per_cpu_ptr(z->pageset, cpu); 3088 if (pcp->pcp.count) { 3089 has_pcps = true; 3090 break; 3091 } 3092 } 3093 } 3094 3095 if (has_pcps) 3096 cpumask_set_cpu(cpu, &cpus_with_pcps); 3097 else 3098 cpumask_clear_cpu(cpu, &cpus_with_pcps); 3099 } 3100 3101 for_each_cpu(cpu, &cpus_with_pcps) { 3102 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); 3103 3104 drain->zone = zone; 3105 INIT_WORK(&drain->work, drain_local_pages_wq); 3106 queue_work_on(cpu, mm_percpu_wq, &drain->work); 3107 } 3108 for_each_cpu(cpu, &cpus_with_pcps) 3109 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); 3110 3111 mutex_unlock(&pcpu_drain_mutex); 3112 } 3113