From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
--------------------------------
For reliable memory allocation bind to nodes which do not hvve any reliable zones, its memory allocation will fail and then warn message will be produced at the end of __alloc_pages_slowpath().
Though this memory allocation can fallback to movable zone in check_after_alloc() if fallback is enabled, something should be done to prevent this pointless warn log.
To solve this problem, fallback to movable zone if no suitable zone found.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- mm/page_alloc.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1e59761354e1..44d286286dbb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4272,6 +4272,25 @@ check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) }
#ifdef CONFIG_MEMORY_RELIABLE +/* + * if fallback is enabled, fallback to movable zone if no dma/normal zone + * found + */ +static inline struct zone *mem_reliable_fallback_zone(gfp_t gfp_mask, + struct alloc_context *ac) +{ + if (!reliable_allow_fb_enabled()) + return NULL; + + if (!(gfp_mask & ___GFP_RELIABILITY)) + return NULL; + + ac->high_zoneidx = gfp_zone(gfp_mask & ~___GFP_RELIABILITY); + ac->preferred_zoneref = first_zones_zonelist( + ac->zonelist, ac->high_zoneidx, ac->nodemask); + return ac->preferred_zoneref->zone; +} + static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask, struct alloc_context *ac) { @@ -4290,6 +4309,11 @@ static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask, } } #else +static inline struct zone *mem_reliable_fallback_zone(gfp_t gfp_mask, + struct alloc_context *ac) +{ + return NULL; +} static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask, struct alloc_context *ac) {} #endif @@ -4339,8 +4363,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, */ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->high_zoneidx, ac->nodemask); - if (!ac->preferred_zoneref->zone) - goto nopage; + if (!ac->preferred_zoneref->zone) { + if (!mem_reliable_fallback_zone(gfp_mask, ac)) + goto nopage; + }
if (gfp_mask & __GFP_KSWAPD_RECLAIM) wake_all_kswapds(order, gfp_mask, ac);