hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID4GC1 -------------------------------- During testing, we observed that memory allocation with node_reclaim_mode enabled becomes extremely slow when a large allocation is attempted on a node whose free memory is mostly occupied by clean page cache. The slowness arises because during node reclaim, only direct reclaim-like behavior is triggered - recycling only 32 pages at a time - without waking kswapd, even when the watermark levels and alloc_flags already satisfy the condition to activate kswapd. This patch wakes kswapd during node reclaim, allowing background reclaim to bring free memory up to the high watermark and avoid excessive node reclaim overhead. Signed-off-by: Wupeng Ma <mawupeng1@huawei.com> --- mm/internal.h | 5 +++-- mm/page_alloc.c | 3 ++- mm/vmscan.c | 6 +++++- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 71e6f523175d..d9963389cbff 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1077,11 +1077,12 @@ static inline void mminit_verify_zonelist(void) #define NODE_RECLAIM_SUCCESS 1 #ifdef CONFIG_NUMA -extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); +int node_reclaim(struct pglist_data *pgdat, gfp_t mask, unsigned int order, + int alloc_flags, struct zone *zone); extern int find_next_best_node(int node, nodemask_t *used_node_mask); #else static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, - unsigned int order) + unsigned int order, int alloc_flags, struct zone *zone) { return NODE_RECLAIM_NOSCAN; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ce0203f660e8..77aef44655ad 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3501,7 +3501,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) continue; - ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); + ret = node_reclaim(zone->zone_pgdat, gfp_mask, order, + alloc_flags, zone); switch (ret) { case NODE_RECLAIM_NOSCAN: /* did not scan */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 2cecc9a173aa..5231320c5559 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -7512,7 +7512,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in return sc.nr_reclaimed >= nr_pages; } -int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) +int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order, + int alloc_flags, struct zone *zone) { int ret; @@ -7549,6 +7550,9 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) return NODE_RECLAIM_NOSCAN; + if (alloc_flags & ALLOC_KSWAPD) + wakeup_kswapd(zone, gfp_mask, order, gfp_zone(gfp_mask)); + ret = __node_reclaim(pgdat, gfp_mask, order); clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); -- 2.43.0