From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8USBA
--------------------------------
Memory folio is enabled in current branch, however the counting of reliale memory task need some polish to make the counting right.
In zap_present_folio_ptes, non-anon folio should also be counted.
Fixes: b5be205e006a ("mm/memory: factor out zapping of present pte into zap_present_pte()") Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- kernel/events/uprobes.c | 2 +- mm/huge_memory.c | 4 ++-- mm/khugepaged.c | 2 +- mm/ksm.c | 2 +- mm/memory.c | 7 ++++--- mm/rmap.c | 8 ++++---- 6 files changed, 13 insertions(+), 12 deletions(-)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 9f8d9baa7a2f..a80072c3f888 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -199,7 +199,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, set_pte_at_notify(mm, addr, pvmw.pte, mk_pte(new_page, vma->vm_page_prot));
- add_reliable_page_counter(old_page, mm, -1); + add_reliable_folio_counter(old_folio, mm, -1); folio_remove_rmap_pte(old_folio, old_page, vma); if (!folio_mapped(old_folio)) folio_free_swap(old_folio); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 32ddf09db52b..d743502c70f0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1608,7 +1608,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, return -EAGAIN; } add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); - add_reliable_page_counter(src_page, dst_mm, HPAGE_PMD_NR); + add_reliable_folio_counter(src_folio, dst_mm, HPAGE_PMD_NR); out_zero_page: mm_inc_nr_ptes(dst_mm); pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); @@ -2577,7 +2577,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, folio_mark_dirty(folio); if (!folio_test_referenced(folio) && pmd_young(old_pmd)) folio_set_referenced(folio); - add_reliable_page_counter(page, mm, -HPAGE_PMD_NR); + add_reliable_folio_counter(folio, mm, -HPAGE_PMD_NR); folio_remove_rmap_pmd(folio, page, vma); folio_put(folio); } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index fa787464662f..06f31ad4452e 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1644,7 +1644,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, */ ptep_clear(mm, addr, pte); folio_remove_rmap_pte(folio, page, vma); - add_reliable_page_counter(page, mm, -1); + add_reliable_folio_counter(folio, mm, -1); nr_ptes++; }
diff --git a/mm/ksm.c b/mm/ksm.c index 94207293e6fc..09411079262b 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1236,7 +1236,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, */ if (!is_zero_pfn(page_to_pfn(kpage))) { folio_get(kfolio); - add_reliable_page_counter(kpage, mm, 1); + add_reliable_folio_counter(kfolio, mm, 1); folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE); newpte = mk_pte(kpage, vma->vm_page_prot); } else { diff --git a/mm/memory.c b/mm/memory.c index fe10342687d0..49a5618661d8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1495,12 +1495,12 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, if (pte_young(ptent) && likely(vma_has_recency(vma))) folio_mark_accessed(folio); rss[mm_counter(folio)] -= nr; - add_reliable_page_counter(page, mm, -nr); } else { /* We don't need up-to-date accessed/dirty bits. */ clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); rss[MM_ANONPAGES] -= nr; } + add_reliable_folio_counter(folio, mm, -nr);
/* Checking a single PTE in a batch is sufficient. */ arch_check_zapped_pte(vma, ptent); @@ -1637,7 +1637,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, */ WARN_ON_ONCE(!vma_is_anonymous(vma)); rss[mm_counter(folio)]--; - add_reliable_page_counter(page, mm, -1); + add_reliable_folio_counter(folio, mm, -1); if (is_device_private_entry(entry)) folio_remove_rmap_pte(folio, page, vma); folio_put(folio); @@ -1654,6 +1654,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (!should_zap_folio(details, folio)) continue; rss[mm_counter(folio)]--; + add_reliable_folio_counter(folio, mm, -1); } else if (pte_marker_entry_uffd_wp(entry)) { /* * For anon: always drop the marker; for file: only @@ -4643,7 +4644,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); - add_reliable_page_counter(page, vma->vm_mm, HPAGE_PMD_NR); + add_reliable_folio_counter(folio, vma->vm_mm, HPAGE_PMD_NR); folio_add_file_rmap_pmd(folio, page, vma);
/* diff --git a/mm/rmap.c b/mm/rmap.c index d13003244e6a..d78bb701bda1 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1757,7 +1757,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, hsz); } else { dec_mm_counter(mm, mm_counter(folio)); - add_reliable_page_counter(&folio->page, mm, -1); + add_reliable_folio_counter(folio, mm, -1); set_pte_at(mm, address, pvmw.pte, pteval); }
@@ -1773,7 +1773,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, * copied pages. */ dec_mm_counter(mm, mm_counter(folio)); - add_reliable_page_counter(&folio->page, mm, -1); + add_reliable_folio_counter(folio, mm, -1); } else if (folio_test_anon(folio)) { swp_entry_t entry = page_swap_entry(subpage); pte_t swp_pte; @@ -2163,7 +2163,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, hsz); } else { dec_mm_counter(mm, mm_counter(folio)); - add_reliable_page_counter(&folio->page, mm, -1); + add_reliable_folio_counter(folio, mm, -1); set_pte_at(mm, address, pvmw.pte, pteval); }
@@ -2179,7 +2179,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, * copied pages. */ dec_mm_counter(mm, mm_counter(folio)); - add_reliable_page_counter(&folio->page, mm, -1); + add_reliable_folio_counter(folio, mm, -1); } else { swp_entry_t entry; pte_t swp_pte;