Fix unexpected migration of pages from dynamic hugetlb pool.
Liu Shixin (2): dhugetlb: introduce page_belong_to_dynamic_hugetlb() function dhugetlb: skip unexpected migration
include/linux/hugetlb.h | 5 +++++ include/linux/migrate.h | 6 +++++- mm/compaction.c | 3 +++ mm/hugetlb.c | 16 +++++++++++----- mm/mempolicy.c | 10 ++++++++-- mm/migrate.c | 3 +++ mm/page_isolation.c | 3 ++- 7 files changed, 37 insertions(+), 9 deletions(-)
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/4209 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/T...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/4209 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/T...
hulk inclusion category: cleanup bugzilla: https://gitee.com/openeuler/kernel/issues/I8YWPT CVE: NA
--------------------------------
Use page_belong_to_dynamic_hugetlb() helper to check whether a page is from dynamic hugetlb pool.
No functional change.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/hugetlb.h | 5 +++++ mm/hugetlb.c | 16 +++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 868aea82db2d..869371c3da05 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -745,6 +745,7 @@ void move_pages_from_smpool_to_hpool(struct dhugetlb_pool *hpool, struct small_page_pool *smpool); void dhugetlb_reserve_hugepages(struct dhugetlb_pool *hpool, unsigned long count, bool gigantic); +bool page_belong_to_dynamic_hugetlb(struct page *page); #else #define enable_dhugetlb 0 #define dhugetlb_enabled 0 @@ -760,6 +761,10 @@ static inline struct dhugetlb_pool *get_dhugetlb_pool_from_dhugetlb_pagelist( return NULL; } static inline void dhugetlb_pool_put(struct dhugetlb_pool *hpool) { return; } +static inline bool page_belong_to_dynamic_hugetlb(struct page *page) +{ + return false; +} #endif /* CONFIG_DYNAMIC_HUGETLB */
static inline spinlock_t *huge_pte_lock(struct hstate *h, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 79d5d2cab271..04caf77c51d7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1762,7 +1762,6 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, int dissolve_free_huge_page(struct page *page) { int rc = -EBUSY; - struct dhugetlb_pool *hpool;
retry: /* Not to disrupt normal path by vainly holding hugetlb_lock */ @@ -1770,11 +1769,8 @@ int dissolve_free_huge_page(struct page *page) return 0;
/* Skip dissolve hugepage for dynamic hugetlb */ - hpool = get_dhugetlb_pool_from_dhugetlb_pagelist(page); - if (hpool) { - dhugetlb_pool_put(hpool); + if (page_belong_to_dynamic_hugetlb(page)) return -EBUSY; - }
spin_lock(&hugetlb_lock); if (!PageHuge(page)) { @@ -4210,6 +4206,16 @@ static int dhugetlb_acct_memory(struct hstate *h, long delta,
return ret; } + +bool page_belong_to_dynamic_hugetlb(struct page *page) +{ + struct dhugetlb_pool *hpool; + + hpool = get_dhugetlb_pool_from_dhugetlb_pagelist(page); + dhugetlb_pool_put(hpool); + + return !!hpool; +} #else static int dhugetlb_acct_memory(struct hstate *h, long delta, struct dhugetlb_pool *hpool)
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8YWPT CVE: NA
--------------------------------
With dynamic hugetlb feature, some memory is isolated in the dynamic pool. When try to compact memory, the kcompactd thread will scan all memory, althougt some memory is belonging to dynamic pool, kcompactd still try to migrate them. After migration, these memory will free to dynamic pool rather than buddy system, which results the free pages in buddy system decreased.
Since it is unnecessary to compact the memory in the dynamic pool, skip migrate them to fix the problem.
The same problem also existed in alloc_contig_range(), offline_pages() and numa balancing. Skip it again in these three scenarios.
In addition to this, we have to consider the migration of hugepage, if a hugepage is from dynamic pool, we should not allow to migrate it.
Fixes: 0bc0d0d57eda ("dhugetlb: backport dynamic hugetlb feature") Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/migrate.h | 6 +++++- mm/compaction.c | 3 +++ mm/mempolicy.c | 10 ++++++++-- mm/migrate.c | 3 +++ mm/page_isolation.c | 3 ++- 5 files changed, 21 insertions(+), 4 deletions(-)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f2b4abbca55e..dc1df7b085cd 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -38,9 +38,13 @@ static inline struct page *new_page_nodemask(struct page *page, unsigned int order = 0; struct page *new_page = NULL;
- if (PageHuge(page)) + if (PageHuge(page)) { + if (page_belong_to_dynamic_hugetlb(page)) + return NULL; + return alloc_huge_page_nodemask(page_hstate(compound_head(page)), preferred_nid, nodemask); + }
if (PageTransHuge(page)) { gfp_mask |= GFP_TRANSHUGE; diff --git a/mm/compaction.c b/mm/compaction.c index 1d991e443322..f45a057a0e64 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1271,6 +1271,9 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, if (!page) continue;
+ if (page_belong_to_dynamic_hugetlb(page)) + continue; + /* If isolation recently failed, do not retry */ if (!isolation_suitable(cc, page)) continue; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 701988dc02f6..e8c82f3235e2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1040,10 +1040,13 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist, /* page allocation callback for NUMA node migration */ struct page *alloc_new_node_page(struct page *page, unsigned long node) { - if (PageHuge(page)) + if (PageHuge(page)) { + if (page_belong_to_dynamic_hugetlb(page)) + return NULL; + return alloc_huge_page_node(page_hstate(compound_head(page)), node); - else if (PageTransHuge(page)) { + } else if (PageTransHuge(page)) { struct page *thp;
thp = alloc_pages_node(node, @@ -1217,6 +1220,9 @@ static struct page *new_page(struct page *page, unsigned long start) }
if (PageHuge(page)) { + if (page_belong_to_dynamic_hugetlb(page)) + return NULL; + return alloc_huge_page_vma(page_hstate(compound_head(page)), vma, address); } else if (PageTransHuge(page)) { diff --git a/mm/migrate.c b/mm/migrate.c index 56a2033d443c..dbe174f86cfd 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1907,6 +1907,9 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) return 0;
+ if (page_belong_to_dynamic_hugetlb(page)) + return 0; + if (isolate_lru_page(page)) return 0;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 49b50cef0101..05f90155d5be 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -220,7 +220,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page) { - if (set_migratetype_isolate(page, migratetype, flags)) { + if (page_belong_to_dynamic_hugetlb(page) || + set_migratetype_isolate(page, migratetype, flags)) { undo_pfn = pfn; goto undo; }