
From: "Matthew Wilcox (Oracle)" <willy@infradead.org> mainline inclusion from mainline-v6.8-rc1 commit 6e03492e9d288d9ce886064289e2768da5d7d967 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC3A3N Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i... -------------------------------- The only two callers simply call put_page() on the page returned, so they're happier calling folio_put(). Saves two calls to compound_head(). Link: https://lkml.kernel.org/r/20231213215842.671461-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Conflicts: mm/swap.h mm/swap_state.c [Context conflict: 1. patchset "mempolicy: cleanups leading to NUMA mpol without vma" is not merged. 2. patchset "workload-specific and memory pressure-driven zswap writeback" is not merged.] Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> --- mm/madvise.c | 22 +++++++++++----------- mm/swap.h | 8 +++----- mm/swap_state.c | 16 ++++++++++------ 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 4fd5eb465bad..0ad321350cfe 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -203,7 +203,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, for (addr = start; addr < end; addr += PAGE_SIZE) { pte_t pte; swp_entry_t entry; - struct page *page; + struct folio *folio; if (!ptep++) { ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); @@ -221,10 +221,10 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, pte_unmap_unlock(ptep, ptl); ptep = NULL; - page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, + folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, vma, addr, &splug); - if (page) - put_page(page); + if (folio) + folio_put(folio); } if (ptep) @@ -246,17 +246,17 @@ static void shmem_swapin_range(struct vm_area_struct *vma, { XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); pgoff_t end_index = linear_page_index(vma, end) - 1; - struct page *page; + struct folio *folio; struct swap_iocb *splug = NULL; rcu_read_lock(); - xas_for_each(&xas, page, end_index) { + xas_for_each(&xas, folio, end_index) { unsigned long addr; swp_entry_t entry; - if (!xa_is_value(page)) + if (!xa_is_value(folio)) continue; - entry = radix_to_swp_entry(page); + entry = radix_to_swp_entry(folio); /* There might be swapin error entries in shmem mapping. */ if (non_swap_entry(entry)) continue; @@ -266,10 +266,10 @@ static void shmem_swapin_range(struct vm_area_struct *vma, xas_pause(&xas); rcu_read_unlock(); - page = read_swap_cache_async(entry, mapping_gfp_mask(mapping), + folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping), vma, addr, &splug); - if (page) - put_page(page); + if (folio) + folio_put(folio); rcu_read_lock(); } diff --git a/mm/swap.h b/mm/swap.h index b6c6a3b46ff3..cf01b7cc2c60 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -44,11 +44,9 @@ struct folio *swap_cache_get_folio(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr); struct folio *filemap_get_incore_folio(struct address_space *mapping, pgoff_t index); - -struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, - struct vm_area_struct *vma, - unsigned long addr, - struct swap_iocb **plug); +struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr, + struct swap_iocb **plug); struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, diff --git a/mm/swap_state.c b/mm/swap_state.c index d0fea9932d4b..ce8147cc4590 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -537,9 +537,9 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * __read_swap_cache_async() call them and swap_read_folio() holds the * swap cache folio lock. */ -struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, - struct vm_area_struct *vma, - unsigned long addr, struct swap_iocb **plug) +struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr, + struct swap_iocb **plug) { bool page_was_allocated; struct folio *folio = __read_swap_cache_async(entry, gfp_mask, @@ -548,7 +548,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, if (page_was_allocated) swap_read_folio(folio, false, plug); - return folio_file_page(folio, swp_offset(entry)); + return folio; } static unsigned int __swapin_nr_pages(unsigned long prev_offset, @@ -680,7 +680,9 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, lru_add_drain(); /* Push any new pages onto the LRU now */ skip: /* The page was likely read above, so no need for plugging here */ - return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL); + folio = read_swap_cache_async(entry, gfp_mask, vma, addr, NULL); + + return folio_file_page(folio, swp_offset(entry)); } int init_swap_address_space(unsigned int type, unsigned long nr_pages) @@ -847,8 +849,10 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, lru_add_drain(); skip: /* The page was likely read above, so no need for plugging here */ - return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, + folio = read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, NULL); + + return folio_file_page(folio, swp_offset(entry)); } /** -- 2.25.1