
From: "Matthew Wilcox (Oracle)" <willy@infradead.org> mainline inclusion from mainline-v6.8-rc1 commit 96c7b0b42239e7b8987b2664b458dc74e825f760 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC3A3N Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i... -------------------------------- Patch series "More swap folio conversions". These all seem like fairly straightforward conversions to me. A lot of compound_head() calls get removed. And page_swap_info(), which is nice. This patch (of 13): Move the folio->page conversion into the callers that actually want that. Most of the callers are happier with the folio anyway. If the page_allocated boolean is set, the folio allocated is of order-0, so it is safe to pass the page directly to swap_readpage(). Link: https://lkml.kernel.org/r/20231213215842.671461-1-willy@infradead.org Link: https://lkml.kernel.org/r/20231213215842.671461-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Conflicts: include/linux/zswap.h mm/swap.h mm/swap_state.c mm/zswap.c [Context conflict: 1. patchset "workload-specific and memory pressure -driven zswap writeback" is not merged. 2. patchset "mempolicy: cleanups leading to NUMA mpol without vma" is not merged. 3. patchset "mm: zswap: cleanups" is not merged] Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> --- mm/swap.h | 2 +- mm/swap_state.c | 50 ++++++++++++++++++++++------------------------- mm/zswap.c | 52 ++++++++++++++++++++++++------------------------- 3 files changed, 50 insertions(+), 54 deletions(-) diff --git a/mm/swap.h b/mm/swap.h index 500f99202776..1d3ba5b45298 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -48,7 +48,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, struct swap_iocb **plug); -struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, +struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated); diff --git a/mm/swap_state.c b/mm/swap_state.c index bd2bc1d4cd86..9f4f52350655 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -426,13 +426,12 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping, return folio; } -struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, +struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated) { struct swap_info_struct *si; struct folio *folio; - struct page *page; void *shadow = NULL; *new_page_allocated = false; @@ -449,10 +448,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, */ folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry)); - if (!IS_ERR(folio)) { - page = folio_file_page(folio, swp_offset(entry)); - goto got_page; - } + if (!IS_ERR(folio)) + goto got_folio; /* * Just skip read ahead for unused swap slot. @@ -466,7 +463,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, goto fail_put_swap; /* - * Get a new page to read into from swap. Allocate it now, + * Get a new folio to read into from swap. Allocate it now, * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will * cause any racers to loop around until we add it to cache. */ @@ -490,13 +487,13 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * stumble across a swap_map entry whose SWAP_HAS_CACHE * has not yet been cleared. Or race against another * __read_swap_cache_async(), which has set SWAP_HAS_CACHE - * in swap_map, but not yet added its page to swap cache. + * in swap_map, but not yet added its folio to swap cache. */ schedule_timeout_uninterruptible(1); } /* - * The swap entry is ours to swap in. Prepare the new page. + * The swap entry is ours to swap in. Prepare the new folio. */ __folio_set_locked(folio); @@ -517,10 +514,9 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* Caller will initiate read into locked folio */ folio_add_lru(folio); *new_page_allocated = true; - page = &folio->page; -got_page: +got_folio: put_swap_device(si); - return page; + return folio; fail_unlock: put_swap_folio(folio, entry); @@ -546,13 +542,13 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, unsigned long addr, struct swap_iocb **plug) { bool page_was_allocated; - struct page *retpage = __read_swap_cache_async(entry, gfp_mask, + struct folio *folio = __read_swap_cache_async(entry, gfp_mask, vma, addr, &page_was_allocated); if (page_was_allocated) - swap_readpage(retpage, false, plug); + swap_readpage(&folio->page, false, plug); - return retpage; + return folio_file_page(folio, swp_offset(entry)); } static unsigned int __swapin_nr_pages(unsigned long prev_offset, @@ -637,7 +633,7 @@ static unsigned long swapin_nr_pages(unsigned long offset) struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) { - struct page *page; + struct folio *folio; unsigned long entry_offset = swp_offset(entry); unsigned long offset = entry_offset; unsigned long start_offset, end_offset; @@ -664,19 +660,19 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, blk_start_plug(&plug); for (offset = start_offset; offset <= end_offset ; offset++) { /* Ok, do the async read-ahead now */ - page = __read_swap_cache_async( + folio = __read_swap_cache_async( swp_entry(swp_type(entry), offset), gfp_mask, vma, addr, &page_allocated); - if (!page) + if (!folio) continue; if (page_allocated) { - swap_readpage(page, false, &splug); + swap_readpage(&folio->page, false, &splug); if (offset != entry_offset) { - SetPageReadahead(page); + folio_set_readahead(folio); count_vm_event(SWAP_RA); } } - put_page(page); + folio_put(folio); } blk_finish_plug(&plug); swap_read_unplug(splug); @@ -800,7 +796,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, struct blk_plug plug; struct swap_iocb *splug = NULL; struct vm_area_struct *vma = vmf->vma; - struct page *page; + struct folio *folio; pte_t *pte = NULL, pentry; unsigned long addr; swp_entry_t entry; @@ -831,18 +827,18 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, continue; pte_unmap(pte); pte = NULL; - page = __read_swap_cache_async(entry, gfp_mask, vma, + folio = __read_swap_cache_async(entry, gfp_mask, vma, addr, &page_allocated); - if (!page) + if (!folio) continue; if (page_allocated) { - swap_readpage(page, false, &splug); + swap_readpage(&folio->page, false, &splug); if (i != ra_info.offset) { - SetPageReadahead(page); + folio_set_readahead(folio); count_vm_event(SWAP_RA); } } - put_page(page); + folio_put(folio); } if (pte) pte_unmap(pte); diff --git a/mm/zswap.c b/mm/zswap.c index 69681b9173fd..a2e3141bc985 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1041,14 +1041,14 @@ static int zswap_enabled_param_set(const char *val, * writeback code **********************************/ /* - * Attempts to free an entry by adding a page to the swap cache, - * decompressing the entry data into the page, and issuing a - * bio write to write the page back to the swap device. + * Attempts to free an entry by adding a folio to the swap cache, + * decompressing the entry data into the folio, and issuing a + * bio write to write the folio back to the swap device. * - * This can be thought of as a "resumed writeback" of the page + * This can be thought of as a "resumed writeback" of the folio * to the swap device. We are basically resuming the same swap * writeback path that was intercepted with the zswap_store() - * in the first place. After the page has been decompressed into + * in the first place. After the folio has been decompressed into * the swap cache, the compressed version stored by zswap can be * freed. */ @@ -1056,11 +1056,11 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_tree *tree) { swp_entry_t swpentry = entry->swpentry; - struct page *page; + struct folio *folio; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; struct zpool *pool = zswap_find_zpool(entry); - bool page_was_allocated; + bool folio_was_allocated; u8 *src, *tmp = NULL; unsigned int dlen; int ret; @@ -1074,34 +1074,34 @@ static int zswap_writeback_entry(struct zswap_entry *entry, return -ENOMEM; } - /* try to allocate swap cache page */ - page = __read_swap_cache_async(swpentry, GFP_KERNEL, NULL, 0, - &page_was_allocated); - if (!page) { + /* try to allocate swap cache folio */ + folio = __read_swap_cache_async(swpentry, GFP_KERNEL, NULL, 0, + &folio_was_allocated); + if (!folio) { ret = -ENOMEM; goto fail; } - /* Found an existing page, we raced with load/swapin */ - if (!page_was_allocated) { - put_page(page); + /* Found an existing folio, we raced with load/swapin */ + if (!folio_was_allocated) { + folio_put(folio); ret = -EEXIST; goto fail; } /* - * Page is locked, and the swapcache is now secured against + * folio is locked, and the swapcache is now secured against * concurrent swapping to and from the slot. Verify that the * swap entry hasn't been invalidated and recycled behind our * backs (our zswap_entry reference doesn't prevent that), to - * avoid overwriting a new swap page with old compressed data. + * avoid overwriting a new swap folio with old compressed data. */ spin_lock(&tree->lock); if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) { spin_unlock(&tree->lock); - delete_from_swap_cache(page_folio(page)); - unlock_page(page); - put_page(page); + delete_from_swap_cache(folio); + folio_unlock(folio); + folio_put(folio); ret = -ENOMEM; goto fail; } @@ -1121,7 +1121,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, mutex_lock(acomp_ctx->mutex); sg_init_one(&input, src, entry->length); sg_init_table(&output, 1); - sg_set_page(&output, page, PAGE_SIZE, 0); + sg_set_page(&output, &folio->page, PAGE_SIZE, 0); acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen); ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); dlen = acomp_ctx->req->dlen; @@ -1135,15 +1135,15 @@ static int zswap_writeback_entry(struct zswap_entry *entry, BUG_ON(ret); BUG_ON(dlen != PAGE_SIZE); - /* page is up to date */ - SetPageUptodate(page); + /* folio is up to date */ + folio_mark_uptodate(folio); /* move it to the tail of the inactive list after end_writeback */ - SetPageReclaim(page); + folio_set_reclaim(folio); /* start writeback */ - __swap_writepage(page, &wbc); - put_page(page); + __swap_writepage(&folio->page, &wbc); + folio_put(folio); zswap_written_back_pages++; return ret; @@ -1294,7 +1294,7 @@ bool zswap_store(struct folio *folio) dst = acomp_ctx->dstmem; sg_init_table(&input, 1); - sg_set_page(&input, page, PAGE_SIZE, 0); + sg_set_page(&input, &folio->page, PAGE_SIZE, 0); /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */ sg_init_one(&output, dst, PAGE_SIZE * 2); -- 2.25.1