From: Kefeng Wang wangkefeng.wang@huawei.com
mainline inclusion from mainline-v6.11-rc1 commit 593a10dabe08dcf93259fce2badd8dc2528859a8 category: performance bugzilla: https://gitee.com/openeuler/kernel/issues/IAHY3K
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Folios of order <= 1 are not in deferred list, the check of order is added into folio_undo_large_rmappable() from commit 8897277acfef ("mm: support order-1 folios in the page cache"), but there is a repeated check for small folio (order 0) during each call of the folio_undo_large_rmappable(), so only keep folio_order() check inside the function.
In addition, move all the checks into header file to save a function call for non-large-rmappable or empty deferred_list folio.
Link: https://lkml.kernel.org/r/20240521130315.46072-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Reviewed-by: David Hildenbrand david@redhat.com Reviewed-by: Vishal Moola (Oracle) vishal.moola@gmail.com Cc: Johannes Weiner hannes@cmpxchg.org Cc: Lance Yang ioworker0@gmail.com Cc: Matthew Wilcox (Oracle) willy@infradead.org Cc: Michal Hocko mhocko@kernel.org Cc: Muchun Song muchun.song@linux.dev Cc: Roman Gushchin roman.gushchin@linux.dev Cc: Shakeel Butt shakeel.butt@linux.dev Signed-off-by: Andrew Morton akpm@linux-foundation.org Conflicts: mm/page_alloc.c [ Context conflicts. ] Signed-off-by: Liu Shixin liushixin2@huawei.com --- mm/huge_memory.c | 13 +------------ mm/internal.h | 17 ++++++++++++++++- mm/page_alloc.c | 3 +-- mm/swap.c | 8 ++------ mm/vmscan.c | 8 ++------ 5 files changed, 22 insertions(+), 27 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6178b28ddbcb..1f659287fbb3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3355,22 +3355,11 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, return ret; }
-void folio_undo_large_rmappable(struct folio *folio) +void __folio_undo_large_rmappable(struct folio *folio) { struct deferred_split *ds_queue; unsigned long flags;
- if (folio_order(folio) <= 1) - return; - - /* - * At this point, there is no one trying to add the folio to - * deferred_list. If folio is not in deferred_list, it's safe - * to check without acquiring the split_queue_lock. - */ - if (data_race(list_empty(&folio->_deferred_list))) - return; - ds_queue = get_deferred_split_queue(folio); spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (!list_empty(&folio->_deferred_list)) { diff --git a/mm/internal.h b/mm/internal.h index 0ecbaa392054..7db2957ef3a0 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -593,7 +593,22 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) #endif }
-void folio_undo_large_rmappable(struct folio *folio); +void __folio_undo_large_rmappable(struct folio *folio); +static inline void folio_undo_large_rmappable(struct folio *folio) +{ + if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) + return; + + /* + * At this point, there is no one trying to add the folio to + * deferred_list. If folio is not in deferred_list, it's safe + * to check without acquiring the split_queue_lock. + */ + if (data_race(list_empty(&folio->_deferred_list))) + return; + + __folio_undo_large_rmappable(folio); +}
static inline struct folio *page_rmappable_folio(struct page *page) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c4bec13e2702..52382ba24232 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2568,8 +2568,7 @@ void free_unref_folios(struct folio_batch *folios) continue; }
- if (order > 0 && folio_test_large_rmappable(folio)) - folio_undo_large_rmappable(folio); + folio_undo_large_rmappable(folio); if (!free_unref_page_prepare(&folio->page, pfn, order)) continue;
diff --git a/mm/swap.c b/mm/swap.c index 1c9e8f70d6b5..358bf8494062 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -123,8 +123,7 @@ void __folio_put(struct folio *folio) }
page_cache_release(folio); - if (folio_test_large(folio) && folio_test_large_rmappable(folio)) - folio_undo_large_rmappable(folio); + folio_undo_large_rmappable(folio); mem_cgroup_uncharge(folio); free_unref_page(&folio->page, folio_order(folio)); } @@ -999,10 +998,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) free_huge_folio(folio); continue; } - if (folio_test_large(folio) && - folio_test_large_rmappable(folio)) - folio_undo_large_rmappable(folio); - + folio_undo_large_rmappable(folio); __page_cache_release(folio, &lruvec, &flags);
if (j != i) diff --git a/mm/vmscan.c b/mm/vmscan.c index 37019d51c31d..3337907ae5e9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2135,9 +2135,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, */ nr_reclaimed += nr_pages;
- if (folio_test_large(folio) && - folio_test_large_rmappable(folio)) - folio_undo_large_rmappable(folio); + folio_undo_large_rmappable(folio); if (folio_batch_add(&free_folios, folio) == 0) { mem_cgroup_uncharge_folios(&free_folios); try_to_unmap_flush(); @@ -2545,9 +2543,7 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec, if (unlikely(folio_put_testzero(folio))) { __folio_clear_lru_flags(folio);
- if (folio_test_large(folio) && - folio_test_large_rmappable(folio)) - folio_undo_large_rmappable(folio); + folio_undo_large_rmappable(folio); if (folio_batch_add(&free_folios, folio) == 0) { spin_unlock_irq(&lruvec->lru_lock); mem_cgroup_uncharge_folios(&free_folios);