From: Kefeng Wang wangkefeng.wang@huawei.com
mainline inclusion from mainline-v6.7-rc1 commit d986ba2b1953f761d3859c22160e82c58ed4287d category: other bugzilla: https://gitee.com/openeuler/kernel/issues/I8JQWQ
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Use a folio in change_huge_pmd(), which helps to remove last xchg_page_access_time() caller.
Link: https://lkml.kernel.org/r/20231018140806.2783514-11-wangkefeng.wang@huawei.c... Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Cc: David Hildenbrand david@redhat.com Cc: Huang Ying ying.huang@intel.com Cc: Ingo Molnar mingo@redhat.com Cc: Juri Lelli juri.lelli@redhat.com Cc: Matthew Wilcox (Oracle) willy@infradead.org Cc: Peter Zijlstra peterz@infradead.org Cc: Vincent Guittot vincent.guittot@linaro.org Cc: Zi Yan ziy@nvidia.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: ZhangPeng zhangpeng362@huawei.com --- mm/huge_memory.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c665330982c4..01acd11f308f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1822,7 +1822,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION if (is_swap_pmd(*pmd)) { swp_entry_t entry = pmd_to_swp_entry(*pmd); - struct page *page = pfn_swap_entry_to_page(entry); + struct folio *folio = page_folio(pfn_swap_entry_to_page(entry)); pmd_t newpmd;
VM_BUG_ON(!is_pmd_migration_entry(*pmd)); @@ -1831,7 +1831,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * A protection check is difficult so * just be safe and disable write */ - if (PageAnon(page)) + if (folio_test_anon(folio)) entry = make_readable_exclusive_migration_entry(swp_offset(entry)); else entry = make_readable_migration_entry(swp_offset(entry)); @@ -1853,7 +1853,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, #endif
if (prot_numa) { - struct page *page; + struct folio *folio; bool toptier; /* * Avoid trapping faults against the zero page. The read-only @@ -1866,8 +1866,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_protnone(*pmd)) goto unlock;
- page = pmd_page(*pmd); - toptier = node_is_toptier(page_to_nid(page)); + folio = page_folio(pmd_page(*pmd)); + toptier = node_is_toptier(folio_nid(folio)); /* * Skip scanning top tier node if normal numa * balancing is disabled @@ -1878,7 +1878,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && !toptier) - xchg_page_access_time(page, jiffies_to_msecs(jiffies)); + folio_xchg_access_time(folio, + jiffies_to_msecs(jiffies)); } /* * In case prot_numa, we are under mmap_read_lock(mm). It's critical