From: Kefeng Wang wangkefeng.wang@huawei.com
mainline inclusion from mainline-v6.7-rc1 commit a86bc96b77df40c27ead5ef4ac3837904b7eb53f category: other bugzilla: https://gitee.com/openeuler/kernel/issues/I8JQWQ
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Saves one compound_head() call, also in preparation for page_cpupid_xchg_last() conversion.
Link: https://lkml.kernel.org/r/20231018140806.2783514-18-wangkefeng.wang@huawei.c... Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Cc: David Hildenbrand david@redhat.com Cc: Huang Ying ying.huang@intel.com Cc: Ingo Molnar mingo@redhat.com Cc: Juri Lelli juri.lelli@redhat.com Cc: Matthew Wilcox (Oracle) willy@infradead.org Cc: Peter Zijlstra peterz@infradead.org Cc: Vincent Guittot vincent.guittot@linaro.org Cc: Zi Yan ziy@nvidia.com Signed-off-by: Andrew Morton akpm@linux-foundation.org
Conflicts: mm/memory.c
Signed-off-by: ZhangPeng zhangpeng362@huawei.com --- mm/memory.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c index 40e7ba09e429..5b8d6301397a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3016,7 +3016,7 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ -static inline void wp_page_reuse(struct vm_fault *vmf) +static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; @@ -3024,7 +3024,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf) pte_t entry;
VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); - VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page)); + VM_BUG_ON(folio && folio_test_anon(folio) && !PageAnonExclusive(page));
/* * Clear the pages cpupid information as the existing @@ -3225,6 +3225,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * writeable once the page is prepared * * @vmf: structure describing the fault + * @folio: the folio of vmf->page * * This function handles all that is needed to finish a write page fault in a * shared mapping due to PTE being read-only once the mapped page is prepared. @@ -3236,7 +3237,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before * we acquired PTE lock. */ -static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) +static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) { WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, @@ -3252,7 +3253,7 @@ static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return VM_FAULT_NOPAGE; } - wp_page_reuse(vmf); + wp_page_reuse(vmf, folio); return 0; }
@@ -3277,9 +3278,9 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) ret = vma->vm_ops->pfn_mkwrite(vmf); if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) return ret; - return finish_mkwrite_fault(vmf); + return finish_mkwrite_fault(vmf, NULL); } - wp_page_reuse(vmf); + wp_page_reuse(vmf, NULL); return 0; }
@@ -3307,14 +3308,14 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) folio_put(folio); return tmp; } - tmp = finish_mkwrite_fault(vmf); + tmp = finish_mkwrite_fault(vmf, folio); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { folio_unlock(folio); folio_put(folio); return tmp; } } else { - wp_page_reuse(vmf); + wp_page_reuse(vmf, folio); folio_lock(folio); } ret |= fault_dirty_shared_page(vmf); @@ -3438,7 +3439,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } - wp_page_reuse(vmf); + wp_page_reuse(vmf, folio); return 0; } copy: