From: "Matthew Wilcox (Oracle)" willy@infradead.org
mainline inclusion from mainline-v6.9-rc1 commit 31b2ff82aefb33ce92496a1becddd6ce51060db2 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R3AY CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Call folio_undo_large_rmappable() if needed. free_unref_page_prepare() destroys the ability to call folio_order(), so stash the order in folio->private for the benefit of the second loop.
Link: https://lkml.kernel.org/r/20240227174254.710559-10-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) willy@infradead.org Cc: David Hildenbrand david@redhat.com Cc: Mel Gorman mgorman@suse.de Cc: Ryan Roberts ryan.roberts@arm.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Conflicts: mm/page_alloc.c [ Conflicts with dynamic pool, fix the code in free_unref_folios(). ] Signed-off-by: Liu Shixin liushixin2@huawei.com --- mm/page_alloc.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4e0a11a0b62a..b09e69f14925 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2555,7 +2555,7 @@ void free_unref_page(struct page *page, unsigned int order) }
/* - * Free a batch of 0-order pages + * Free a batch of folios */ void free_unref_folios(struct folio_batch *folios) { @@ -2568,25 +2568,30 @@ void free_unref_folios(struct folio_batch *folios) for (i = 0, j = 0; i < folios->nr; i++) { struct folio *folio = folios->folios[i]; unsigned long pfn = folio_pfn(folio); + unsigned int order = folio_order(folio);
if (page_from_dynamic_pool(&folio->page)) { dynamic_pool_free_page(&folio->page); continue; }
- if (!free_unref_page_prepare(&folio->page, pfn, 0)) + if (order > 0 && folio_test_large_rmappable(folio)) + folio_undo_large_rmappable(folio); + if (!free_unref_page_prepare(&folio->page, pfn, order)) continue;
/* - * Free isolated folios directly to the allocator, see - * comment in free_unref_page. + * Free isolated folios and orders not handled on the PCP + * directly to the allocator, see comment in free_unref_page. */ migratetype = get_pcppage_migratetype(&folio->page); - if (unlikely(is_migrate_isolate(migratetype))) { + if (!pcp_allowed_order(order) || + is_migrate_isolate(migratetype)) { free_one_page(folio_zone(folio), &folio->page, pfn, - 0, migratetype, FPI_NONE); + order, migratetype, FPI_NONE); continue; } + folio->private = (void *)(unsigned long)order; if (j != i) folios->folios[j] = folio; j++; @@ -2596,7 +2601,9 @@ void free_unref_folios(struct folio_batch *folios) for (i = 0; i < folios->nr; i++) { struct folio *folio = folios->folios[i]; struct zone *zone = folio_zone(folio); + unsigned int order = (unsigned long)folio->private;
+ folio->private = NULL; migratetype = get_pcppage_migratetype(&folio->page);
/* Different zone requires a different pcp lock */ @@ -2615,7 +2622,7 @@ void free_unref_folios(struct folio_batch *folios) if (unlikely(!pcp)) { pcp_trylock_finish(UP_flags); free_one_page(zone, &folio->page, - folio_pfn(folio), 0, + folio_pfn(folio), order, migratetype, FPI_NONE); locked_zone = NULL; continue; @@ -2631,7 +2638,8 @@ void free_unref_folios(struct folio_batch *folios) migratetype = MIGRATE_MOVABLE;
trace_mm_page_free_batched(&folio->page); - free_unref_page_commit(zone, pcp, &folio->page, migratetype, 0); + free_unref_page_commit(zone, pcp, &folio->page, migratetype, + order); }
if (pcp) {