From: Kefeng Wang wangkefeng.wang@huawei.com
maillist inclusion category: performance bugzilla: https://gitee.com/openeuler/kernel/issues/I9GBVL CVE: NA
Reference: https://lore.kernel.org/all/20240412064751.119015-2-wangkefeng.wang@huawei.c...
--------------------------------
In order to support batch mm counter updating in filemap_map_pages(), move mm counter updating out of set_pte_range(), the folios are file from filemap, and distinguish folios by vmf->flags and vma->vm_flags from another caller finish_fault().
Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: ZhangPeng zhangpeng362@huawei.com --- mm/filemap.c | 4 ++++ mm/memory.c | 8 +++++--- 2 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c index 94c9f36b17d8..57ed2e109707 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3500,6 +3500,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, skip: if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), + count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3514,6 +3516,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3548,6 +3551,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, ret = VM_FAULT_NOPAGE;
set_pte_range(vmf, folio, page, 1, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), 1); folio_ref_inc(folio);
return ret; diff --git a/mm/memory.c b/mm/memory.c index 83f030d6818d..2c317f517d1d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4660,12 +4660,10 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, /* copy-on-write page */ add_reliable_folio_counter(folio, vma->vm_mm, nr); if (write && !(vma->vm_flags & VM_SHARED)) { - add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); VM_BUG_ON_FOLIO(nr != 1, folio); folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); } else { - add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr); folio_add_file_rmap_ptes(folio, page, nr, vma); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); @@ -4702,9 +4700,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct page *page; vm_fault_t ret; + bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && + !(vma->vm_flags & VM_SHARED);
/* Did we COW the page? */ - if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) + if (is_cow) page = vmf->cow_page; else page = vmf->page; @@ -4740,8 +4740,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf) /* Re-check under ptl */ if (likely(!vmf_pte_changed(vmf))) { struct folio *folio = page_folio(page); + int type = is_cow ? MM_ANONPAGES : mm_counter_file(folio);
set_pte_range(vmf, folio, page, 1, vmf->address); + add_mm_counter(vma->vm_mm, type, 1); ret = 0; } else { update_mmu_tlb(vma, vmf->address, vmf->pte);