From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8USBA CVE: NA
--------------------------------
Hugepaged collapse pages into huge page will use the same memory region. When hugepaged collapse pages into huge page, hugepaged will check if there is any reliable pages in the area to be collapsed. If this area contains any reliable pages, hugepaged will alloc memory from mirrored region. Otherwise it will alloc memory from non-mirrored region.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- mm/khugepaged.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 128d854dad8e..e28b79d02f31 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -95,6 +95,8 @@ static struct kmem_cache *mm_slot_cache __read_mostly;
struct collapse_control { bool is_khugepaged; + /* alloc hugepage from reliable zone */ + bool reliable;
/* Num pages scanned per node */ u32 node_load[MAX_NUMNODES]; @@ -825,6 +827,7 @@ static void khugepaged_alloc_sleep(void)
struct collapse_control khugepaged_collapse_control = { .is_khugepaged = true, + .reliable = false, };
static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc) @@ -1063,6 +1066,9 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, int node = hpage_collapse_find_target_node(cc); struct folio *folio;
+ if (cc->reliable) + gfp |= GFP_RELIABLE; + if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask)) return SCAN_ALLOC_HUGE_PAGE_FAIL;
@@ -1260,6 +1266,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); + cc->reliable = false; pte = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte) { result = SCAN_PMD_NULL; @@ -1384,6 +1391,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address))) referenced++; + + if (page_reliable(page)) + cc->reliable = true; } if (!writable) { result = SCAN_PAGE_RO; @@ -2227,6 +2237,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, swap = 0; memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); + cc->reliable = false; rcu_read_lock(); xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { if (xas_retry(&xas, page)) @@ -2294,6 +2305,9 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, xas_pause(&xas); cond_resched_rcu(); } + + if (page_reliable(page)) + cc->reliable = true; } rcu_read_unlock();
@@ -2739,6 +2753,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, mmap_assert_locked(mm); memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); + cc->reliable = false; if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { struct file *file = get_file(vma->vm_file); pgoff_t pgoff = linear_page_index(vma, addr);