
From: "Matthew Wilcox (Oracle)" <willy@infradead.org> mainline inclusion from mainline-v6.10-rc1 commit 0234779276e56fb17677f3cf64d7cd501f8abe69 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IAXCD2 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i... -------------------------------- Work purely in terms of the folio. Removes a call to compound_head() in put_page(). Link: https://lkml.kernel.org/r/20240403171838.1445826-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Conflicts: mm/khugepaged.c [use folio version of add_reliable_[page/folio]_counter] Signed-off-by: Nanyong Sun <sunnanyong@huawei.com> --- mm/khugepaged.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3df4bf24dbc7..f2855694de53 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1113,7 +1113,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, pte_t *pte; pgtable_t pgtable; struct folio *folio; - struct page *hpage; spinlock_t *pmd_ptl, *pte_ptl; int result = SCAN_FAIL; struct vm_area_struct *vma; @@ -1130,7 +1129,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, mmap_read_unlock(mm); result = alloc_charge_folio(&folio, mm, cc); - hpage = &folio->page; if (result != SCAN_SUCCEED) goto out_nolock; @@ -1226,7 +1224,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, */ anon_vma_unlock_write(vma->anon_vma); - result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd, + result = __collapse_huge_page_copy(pte, &folio->page, pmd, _pmd, vma, address, pte_ptl, &compound_pagelist); pte_unmap(pte); @@ -1241,12 +1239,12 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, __folio_mark_uptodate(folio); pgtable = pmd_pgtable(_pmd); - _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); + _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot); _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); - add_reliable_page_counter(hpage, vma->vm_mm, HPAGE_PMD_NR); + add_reliable_folio_counter(folio, vma->vm_mm, HPAGE_PMD_NR); folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); pgtable_trans_huge_deposit(mm, pmd, pgtable); @@ -1254,14 +1252,14 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, update_mmu_cache_pmd(vma, address, pmd); spin_unlock(pmd_ptl); - hpage = NULL; + folio = NULL; result = SCAN_SUCCEED; out_up_write: mmap_write_unlock(mm); out_nolock: - if (hpage) - put_page(hpage); + if (folio) + folio_put(folio); trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); return result; } -- 2.33.0