
From: "Matthew Wilcox (Oracle)" <willy@infradead.org> mainline inclusion from mainline-v6.8-rc1 commit a4575c4138db887bd27dc7f87cf7cfb0224c6f5e category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC3A3N Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i... -------------------------------- shmem_swapin_cluster() immediately converts the page back to a folio, and swapin_readahead() may as well call folio_file_page() once instead of having each function call it. [willy@infradead.org: avoid NULL pointer deref] Link: https://lkml.kernel.org/r/ZYI7OcVlM1voKfBl@casper.infradead.org Link: https://lkml.kernel.org/r/20231213215842.671461-14-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Conflicts: mm/shmem.c mm/swap.h mm/swap_state.c [Context conflict: patchset "mempolicy: cleanups leading to NUMA mpol without vma" is not merged.] Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> --- mm/shmem.c | 8 +++----- mm/swap.h | 4 ++-- mm/swap_state.c | 24 +++++++++++++----------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 3f5ba0827ecf..6fdc7144ca91 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1719,18 +1719,16 @@ static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; - struct page *page; + struct folio *folio; struct vm_fault vmf = { .vma = &pvma, }; shmem_pseudo_vma_init(&pvma, info, index); - page = swap_cluster_readahead(swap, gfp, &vmf); + folio = swap_cluster_readahead(swap, gfp, &vmf); shmem_pseudo_vma_destroy(&pvma); - if (!page) - return NULL; - return page_folio(page); + return folio; } /* diff --git a/mm/swap.h b/mm/swap.h index cf01b7cc2c60..af4dc6921b90 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -51,7 +51,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated); -struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, +struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, struct vm_fault *vmf); struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, struct vm_fault *vmf); @@ -79,7 +79,7 @@ static inline void show_swap_cache_info(void) { } -static inline struct page *swap_cluster_readahead(swp_entry_t entry, +static inline struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) { return NULL; diff --git a/mm/swap_state.c b/mm/swap_state.c index ce8147cc4590..a6136403a812 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -618,7 +618,7 @@ static unsigned long swapin_nr_pages(unsigned long offset) * @gfp_mask: memory allocation flags * @vmf: fault information * - * Returns the struct page for entry and addr, after queueing swapin. + * Returns the struct folio for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen @@ -630,7 +630,7 @@ static unsigned long swapin_nr_pages(unsigned long offset) * * Caller must hold read mmap_lock if vmf->vma is not NULL. */ -struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, +struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) { struct folio *folio; @@ -680,9 +680,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, lru_add_drain(); /* Push any new pages onto the LRU now */ skip: /* The page was likely read above, so no need for plugging here */ - folio = read_swap_cache_async(entry, gfp_mask, vma, addr, NULL); - - return folio_file_page(folio, swp_offset(entry)); + return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL); } int init_swap_address_space(unsigned int type, unsigned long nr_pages) @@ -784,7 +782,7 @@ static void swap_ra_info(struct vm_fault *vmf, * @gfp_mask: memory allocation flags * @vmf: fault information * - * Returns the struct page for entry and addr, after queueing swapin. + * Returns the struct folio for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read in a few pages whose * virtual addresses are around the fault address in the same vma. @@ -792,7 +790,7 @@ static void swap_ra_info(struct vm_fault *vmf, * Caller must hold read mmap_lock if vmf->vma is not NULL. * */ -static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, +static struct folio *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, struct vm_fault *vmf) { struct blk_plug plug; @@ -849,10 +847,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, lru_add_drain(); skip: /* The page was likely read above, so no need for plugging here */ - folio = read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, + return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, NULL); - - return folio_file_page(folio, swp_offset(entry)); } /** @@ -870,9 +866,15 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) { - return swap_use_vma_readahead() ? + struct folio *folio; + + folio = swap_use_vma_readahead() ? swap_vma_readahead(entry, gfp_mask, vmf) : swap_cluster_readahead(entry, gfp_mask, vmf); + + if (!folio) + return NULL; + return folio_file_page(folio, swp_offset(entry)); } #ifdef CONFIG_SYSFS -- 2.25.1