mainline inclusion from mainline-v6.9-rc1 commit a23f517b0e1554467b0eb3bc1ebcb4d626217302 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CHB4 CVE: NA
-------------------------------------------------
Now all callers of mm_counter() have a folio, convert mm_counter() to take a folio. Saves a call to compound_head() hidden inside PageAnon().
Link: https://lkml.kernel.org/r/20240111152429.3374566-10-willy@infradead.org Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Matthew Wilcox (Oracle) willy@infradead.org Cc: David Hildenbrand david@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org (cherry picked from commit a23f517b0e1554467b0eb3bc1ebcb4d626217302) Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com
Conflicts: mm/memory.c mm/rmap.c --- arch/s390/mm/pgtable.c | 2 +- include/linux/mm.h | 6 +++--- mm/memory.c | 10 +++++----- mm/rmap.c | 8 ++++---- mm/userfaultfd.c | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 24e7be76f71d..66d4c227c098 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -732,7 +732,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) else if (is_migration_entry(entry)) { struct folio *folio = pfn_swap_entry_folio(entry);
- dec_mm_counter(mm, mm_counter(&folio->page)); + dec_mm_counter(mm, mm_counter(folio)); } free_swap_and_cache(entry); } diff --git a/include/linux/mm.h b/include/linux/mm.h index 3452aa356a71..aefa9c1ae3c5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2615,11 +2615,11 @@ static inline int mm_counter_file(struct page *page) return MM_FILEPAGES; }
-static inline int mm_counter(struct page *page) +static inline int mm_counter(struct folio *folio) { - if (PageAnon(page)) + if (folio_test_anon(folio)) return MM_ANONPAGES; - return mm_counter_file(page); + return mm_counter_file(&folio->page); }
static inline unsigned long get_mm_rss(struct mm_struct *mm) diff --git a/mm/memory.c b/mm/memory.c index 3bb5b9543771..663b098bdf6e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -813,7 +813,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, } else if (is_migration_entry(entry)) { folio = pfn_swap_entry_folio(entry);
- rss[mm_counter(&folio->page)]++; + rss[mm_counter(folio)]++;
if (!is_readable_migration_entry(entry) && is_cow_mapping(vm_flags)) { @@ -845,7 +845,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * keep things as they are. */ folio_get(folio); - rss[mm_counter(page)]++; + rss[mm_counter(folio)]++; /* Cannot fail as these pages cannot get pinned. */ folio_try_dup_anon_rmap_pte(folio, page, src_vma);
@@ -1477,7 +1477,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (pte_young(ptent) && likely(vma_has_recency(vma))) folio_mark_accessed(folio); } - rss[mm_counter(page)]--; + rss[mm_counter(folio)]--; add_reliable_page_counter(page, mm, -1); if (!delay_rmap) { folio_remove_rmap_pte(folio, page, vma); @@ -1506,7 +1506,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, * see zap_install_uffd_wp_if_needed(). */ WARN_ON_ONCE(!vma_is_anonymous(vma)); - rss[mm_counter(page)]--; + rss[mm_counter(folio)]--; add_reliable_page_counter(page, mm, -1); if (is_device_private_entry(entry)) folio_remove_rmap_pte(folio, page, vma); @@ -1522,7 +1522,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, folio = pfn_swap_entry_folio(entry); if (!should_zap_folio(details, folio)) continue; - rss[mm_counter(&folio->page)]--; + rss[mm_counter(folio)]--; } else if (pte_marker_entry_uffd_wp(entry)) { /* * For anon: always drop the marker; for file: only diff --git a/mm/rmap.c b/mm/rmap.c index 73f2b3a33158..db6a2e4bafc2 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1750,7 +1750,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); } else { - dec_mm_counter(mm, mm_counter(&folio->page)); + dec_mm_counter(mm, mm_counter(folio)); add_reliable_page_counter(&folio->page, mm, -1); set_pte_at(mm, address, pvmw.pte, pteval); } @@ -1766,7 +1766,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, * migration) will not expect userfaults on already * copied pages. */ - dec_mm_counter(mm, mm_counter(&folio->page)); + dec_mm_counter(mm, mm_counter(folio)); add_reliable_page_counter(&folio->page, mm, -1); } else if (folio_test_anon(folio)) { swp_entry_t entry = page_swap_entry(subpage); @@ -2156,7 +2156,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); } else { - dec_mm_counter(mm, mm_counter(&folio->page)); + dec_mm_counter(mm, mm_counter(folio)); add_reliable_page_counter(&folio->page, mm, -1); set_pte_at(mm, address, pvmw.pte, pteval); } @@ -2172,7 +2172,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, * migration) will not expect userfaults on already * copied pages. */ - dec_mm_counter(mm, mm_counter(&folio->page)); + dec_mm_counter(mm, mm_counter(folio)); add_reliable_page_counter(&folio->page, mm, -1); } else { swp_entry_t entry; diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index f8449f506af2..ef7f6348c7ec 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -125,7 +125,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, * Must happen after rmap, as mm_counter() checks mapping (via * PageAnon()), which is set by __page_set_anon_rmap(). */ - inc_mm_counter(dst_mm, mm_counter(page)); + inc_mm_counter(dst_mm, mm_counter(folio));
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);