From: "Matthew Wilcox (Oracle)" willy@infradead.org
mainline inclusion from mainline-v6.10-rc1 commit 5b8d75913a0ed9deb16140c0aa880c4d6db2dc62 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R3AY CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
The pcp_allowed_order() check in free_the_page() was only being skipped by __folio_put_small() which is about to be rearranged.
Link: https://lkml.kernel.org/r/20240405153228.2563754-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) willy@infradead.org Reviewed-by: Zi Yan ziy@nvidia.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Conflicts: mm/page_alloc.c [ Context conflicts with commit 17edeb5d3f76 and cc92eba1c88b. ] Signed-off-by: Liu Shixin liushixin2@huawei.com --- mm/page_alloc.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 315c3fdaa62b..19ba7cf4e80d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -567,14 +567,6 @@ static inline bool pcp_allowed_order(unsigned int order) return false; }
-static inline void free_the_page(struct page *page, unsigned int order) -{ - if (pcp_allowed_order(order)) /* Via pcp? */ - free_unref_page(page, order); - else - __free_pages_ok(page, order, FPI_NONE); -} - /* * Higher-order pages are called "compound pages". They are structured thusly: * @@ -610,7 +602,7 @@ void destroy_large_folio(struct folio *folio) folio_undo_large_rmappable(folio);
mem_cgroup_uncharge(folio); - free_the_page(&folio->page, folio_order(folio)); + free_unref_page(&folio->page, folio_order(folio)); }
static inline void set_buddy_order(struct page *page, unsigned int order) @@ -2523,6 +2515,11 @@ void free_unref_page(struct page *page, unsigned int order) return; }
+ if (!pcp_allowed_order(order)) { + __free_pages_ok(page, order, FPI_NONE); + return; + } + if (!free_unref_page_prepare(page, pfn, order)) return;
@@ -4909,10 +4906,10 @@ void __free_pages(struct page *page, unsigned int order) int head = PageHead(page);
if (put_page_testzero(page)) - free_the_page(page, order); + free_unref_page(page, order); else if (!head) while (order-- > 0) - free_the_page(page + (1 << order), order); + free_unref_page(page + (1 << order), order); } EXPORT_SYMBOL(__free_pages);
@@ -4963,7 +4960,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count) VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
if (page_ref_sub_and_test(page, count)) - free_the_page(page, compound_order(page)); + free_unref_page(page, compound_order(page)); } EXPORT_SYMBOL(__page_frag_cache_drain);
@@ -5004,7 +5001,7 @@ void *page_frag_alloc_align(struct page_frag_cache *nc, goto refill;
if (unlikely(nc->pfmemalloc)) { - free_the_page(page, compound_order(page)); + free_unref_page(page, compound_order(page)); goto refill; }
@@ -5048,7 +5045,7 @@ void page_frag_free(void *addr) struct page *page = virt_to_head_page(addr);
if (unlikely(put_page_testzero(page))) - free_the_page(page, compound_order(page)); + free_unref_page(page, compound_order(page)); } EXPORT_SYMBOL(page_frag_free);