From: "Matthew Wilcox (Oracle)" willy@infradead.org
mainline inclusion from mainline-v6.10-rc1 commit 43849758fdc976a6d6108ed6dfccdb136fdeec39 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IAXCD2
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Replace the use of pages with folios. Saves a few calls to compound_head() and removes some uses of obsolete functions.
Link: https://lkml.kernel.org/r/20240403171838.1445826-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) willy@infradead.org Reviewed-by: David Hildenbrand david@redhat.com Reviewed-by: Vishal Moola (Oracle) vishal.moola@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Conflicts: mm/khugepaged.c [adjust page_from_dynamic_pool and page_reliable to use folio] Signed-off-by: Nanyong Sun sunnanyong@huawei.com --- include/trace/events/huge_memory.h | 6 ++--- mm/khugepaged.c | 37 +++++++++++++++--------------- 2 files changed, 21 insertions(+), 22 deletions(-)
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index dc6eeef2d3da..ab576898a126 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -174,10 +174,10 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
TRACE_EVENT(mm_khugepaged_scan_file,
- TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file, + TP_PROTO(struct mm_struct *mm, struct folio *folio, struct file *file, int present, int swap, int result),
- TP_ARGS(mm, page, file, present, swap, result), + TP_ARGS(mm, folio, file, present, swap, result),
TP_STRUCT__entry( __field(struct mm_struct *, mm) @@ -190,7 +190,7 @@ TRACE_EVENT(mm_khugepaged_scan_file,
TP_fast_assign( __entry->mm = mm; - __entry->pfn = page ? page_to_pfn(page) : -1; + __entry->pfn = folio ? folio_pfn(folio) : -1; __assign_str(filename, file->f_path.dentry->d_iname); __entry->present = present; __entry->swap = swap; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3cab0e7f9a64..d585ee8357da 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2241,7 +2241,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, struct file *file, pgoff_t start, struct collapse_control *cc) { - struct page *page = NULL; + struct folio *folio = NULL; struct address_space *mapping = file->f_mapping; XA_STATE(xas, &mapping->i_pages, start); int present, swap; @@ -2254,11 +2254,11 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, nodes_clear(cc->alloc_nmask); cc->reliable = false; rcu_read_lock(); - xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { - if (xas_retry(&xas, page)) + xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) { + if (xas_retry(&xas, folio)) continue;
- if (xa_is_value(page)) { + if (xa_is_value(folio)) { ++swap; if (cc->is_khugepaged && swap > khugepaged_max_ptes_swap) { @@ -2273,11 +2273,9 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, * TODO: khugepaged should compact smaller compound pages * into a PMD sized page */ - if (PageTransCompound(page)) { - struct page *head = compound_head(page); - - result = compound_order(head) == HPAGE_PMD_ORDER && - head->index == start + if (folio_test_large(folio)) { + result = folio_order(folio) == HPAGE_PMD_ORDER && + folio->index == start /* Maybe PMD-mapped */ ? SCAN_PTE_MAPPED_HUGEPAGE : SCAN_PAGE_COMPOUND; @@ -2290,33 +2288,34 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, break; }
- node = page_to_nid(page); + node = folio_nid(folio); if (hpage_collapse_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; break; } cc->node_load[node]++;
- if (!PageLRU(page)) { + if (!folio_test_lru(folio)) { result = SCAN_PAGE_LRU; break; }
- if (page_count(page) != - 1 + page_mapcount(page) + page_has_private(page)) { + if (folio_ref_count(folio) != + 1 + folio_mapcount(folio) + folio_test_private(folio)) { result = SCAN_PAGE_COUNT; break; }
- if (page_from_dynamic_pool(page)) { + if (page_from_dynamic_pool(&folio->page)) { result = SCAN_FAIL; break; }
/* - * We probably should check if the page is referenced here, but - * nobody would transfer pte_young() to PageReferenced() for us. - * And rmap walk here is just too costly... + * We probably should check if the folio is referenced + * here, but nobody would transfer pte_young() to + * folio_test_referenced() for us. And rmap walk here + * is just too costly... */
present++; @@ -2326,7 +2325,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, cond_resched_rcu(); }
- if (page_reliable(page)) + if (folio_reliable(folio)) cc->reliable = true; } rcu_read_unlock(); @@ -2341,7 +2340,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, } }
- trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result); + trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result); return result; } #else