From: Baolin Wang baolin.wang@linux.alibaba.com
mainline inclusion from mainline-v6.12-rc1 commit 7de856ffd007f132fd4a0474b289a622a3f88cd7 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IAXCD2
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Shmem already supports the allocation of mTHP, but khugepaged does not yet support collapsing mTHP folios. Now khugepaged is ready to support mTHP, and this patch enables the collapse of shmem mTHP.
Link: https://lkml.kernel.org/r/b9da76aab4276eb6e5d12c479af2b5eea5b4575d.172414060... Signed-off-by: Baolin Wang baolin.wang@linux.alibaba.com Cc: Barry Song 21cnbao@gmail.com Cc: David Hildenbrand david@redhat.com Cc: Hugh Dickins hughd@google.com Cc: Matthew Wilcox willy@infradead.org Cc: Ryan Roberts ryan.roberts@arm.com Cc: Yang Shi shy828301@gmail.com Cc: Zi Yan ziy@nvidia.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Nanyong Sun sunnanyong@huawei.com --- mm/khugepaged.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3337b79f51ae..b5922ed5a745 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1857,7 +1857,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } } while (1);
- for (index = start; index < end; index++) { + for (index = start; index < end;) { xas_set(&xas, index); folio = xas_load(&xas);
@@ -1876,6 +1876,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } } nr_none++; + index++; continue; }
@@ -1957,12 +1958,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, * we locked the first folio, then a THP might be there already. * This will be discovered on the first iteration. */ - if (folio_test_large(folio)) { - result = folio_order(folio) == HPAGE_PMD_ORDER && - folio->index == start - /* Maybe PMD-mapped */ - ? SCAN_PTE_MAPPED_HUGEPAGE - : SCAN_PAGE_COMPOUND; + if (folio_order(folio) == HPAGE_PMD_ORDER && + folio->index == start) { + /* Maybe PMD-mapped */ + result = SCAN_PTE_MAPPED_HUGEPAGE; goto out_unlock; }
@@ -2023,6 +2022,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, * Accumulate the folios that are being collapsed. */ list_add_tail(&folio->lru, &pagelist); + index += folio_nr_pages(folio); continue; out_unlock: folio_unlock(folio); @@ -2276,16 +2276,10 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, continue; }
- /* - * TODO: khugepaged should compact smaller compound pages - * into a PMD sized page - */ - if (folio_test_large(folio)) { - result = folio_order(folio) == HPAGE_PMD_ORDER && - folio->index == start - /* Maybe PMD-mapped */ - ? SCAN_PTE_MAPPED_HUGEPAGE - : SCAN_PAGE_COMPOUND; + if (folio_order(folio) == HPAGE_PMD_ORDER && + folio->index == start) { + /* Maybe PMD-mapped */ + result = SCAN_PTE_MAPPED_HUGEPAGE; /* * For SCAN_PTE_MAPPED_HUGEPAGE, further processing * by the caller won't touch the page cache, and so