mainline inclusion from mainline-v6.9-rc1 commit eabafaaa957553142cdafc8ae804fb679e5a5f5e category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CHB4 CVE: NA
-------------------------------------------------
Make should_zap_page() take a folio and rename it to should_zap_folio() as preparation for converting mm counter functions to take a folio. Saves a call to compound_head() hidden inside PageAnon().
[wangkefeng.wang@huawei.com: fix used-uninitialized warning] Link: https://lkml.kernel.org/r/962a7993-fce9-4de8-85cd-25e290f25736@huawei.com Link: https://lkml.kernel.org/r/20240111152429.3374566-9-willy@infradead.org Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Matthew Wilcox (Oracle) willy@infradead.org Cc: David Hildenbrand david@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org (cherry picked from commit eabafaaa957553142cdafc8ae804fb679e5a5f5e) Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com --- mm/memory.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c index ba94174d2d8a..3bb5b9543771 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1370,19 +1370,20 @@ static inline bool should_zap_cows(struct zap_details *details) return details->even_cows; }
-/* Decides whether we should zap this page with the page pointer specified */ -static inline bool should_zap_page(struct zap_details *details, struct page *page) +/* Decides whether we should zap this folio with the folio pointer specified */ +static inline bool should_zap_folio(struct zap_details *details, + struct folio *folio) { - /* If we can make a decision without *page.. */ + /* If we can make a decision without *folio.. */ if (should_zap_cows(details)) return true;
- /* E.g. the caller passes NULL for the case of a zero page */ - if (!page) + /* E.g. the caller passes NULL for the case of a zero folio */ + if (!folio) return true;
- /* Otherwise we should only zap non-anon pages */ - return !PageAnon(page); + /* Otherwise we should only zap non-anon folios */ + return !folio_test_anon(folio); }
static inline bool zap_drop_file_uffd_wp(struct zap_details *details) @@ -1435,7 +1436,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, arch_enter_lazy_mmu_mode(); do { pte_t ptent = ptep_get(pte); - struct folio *folio; + struct folio *folio = NULL; struct page *page;
if (pte_none(ptent)) @@ -1448,7 +1449,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, unsigned int delay_rmap;
page = vm_normal_page(vma, addr, ptent); - if (unlikely(!should_zap_page(details, page))) + if (page) + folio = page_folio(page); + + if (unlikely(!should_zap_folio(details, folio))) continue; ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); @@ -1461,7 +1465,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, continue; }
- folio = page_folio(page); delay_rmap = 0; if (!folio_test_anon(folio)) { if (pte_dirty(ptent)) { @@ -1494,7 +1497,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, is_device_exclusive_entry(entry)) { page = pfn_swap_entry_to_page(entry); folio = page_folio(page); - if (unlikely(!should_zap_page(details, page))) + if (unlikely(!should_zap_folio(details, folio))) continue; /* * Both device private/exclusive mappings should only @@ -1516,10 +1519,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (unlikely(!free_swap_and_cache(entry))) print_bad_pte(vma, addr, ptent, NULL); } else if (is_migration_entry(entry)) { - page = pfn_swap_entry_to_page(entry); - if (!should_zap_page(details, page)) + folio = pfn_swap_entry_folio(entry); + if (!should_zap_folio(details, folio)) continue; - rss[mm_counter(page)]--; + rss[mm_counter(&folio->page)]--; } else if (pte_marker_entry_uffd_wp(entry)) { /* * For anon: always drop the marker; for file: only