hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IAF8L3
--------------------------------
To skip unexpected migration on dynamic pool, we should also check whether the page is allocated from dynamic pool, add page_from_dynamic_pool(). Add page_from_or_in_dynamic_pool() helper to simplify the check.
Fixes: 9532dabee631 ("mm/dynamic_pool: skip unexpected migration") Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/dynamic_pool.h | 10 ++++++++++ mm/compaction.c | 2 +- mm/hugetlb.c | 4 ++-- mm/migrate.c | 4 ++-- mm/page_isolation.c | 4 ++-- 5 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/include/linux/dynamic_pool.h b/include/linux/dynamic_pool.h index 3b54c384ec5a..1d12d76405cf 100644 --- a/include/linux/dynamic_pool.h +++ b/include/linux/dynamic_pool.h @@ -103,6 +103,11 @@ static inline bool file_in_dynamic_pool(struct hugetlbfs_inode_info *p) }
bool page_in_dynamic_pool(struct page *page); +static inline bool page_from_or_in_dynamic_pool(struct page *page) +{ + return page_from_dynamic_pool(page) || page_in_dynamic_pool(page); +} + int dynamic_pool_can_attach(struct task_struct *tsk, struct mem_cgroup *memcg); struct page *dynamic_pool_alloc_page(gfp_t gfp, unsigned int order, unsigned int alloc_flags); @@ -150,6 +155,11 @@ static inline bool page_in_dynamic_pool(const struct page *page) return false; }
+static inline bool page_from_or_in_dynamic_pool(struct page *page) +{ + return false; +} + static inline int dynamic_pool_can_attach(struct task_struct *tsk, struct mem_cgroup *memcg) { diff --git a/mm/compaction.c b/mm/compaction.c index 09424cb5418f..e2735752c374 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2135,7 +2135,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) continue; }
- if (page_in_dynamic_pool(page)) + if (page_from_or_in_dynamic_pool(page)) continue;
/* diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 360ac804e68d..57c81f87d474 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2368,7 +2368,7 @@ int dissolve_free_huge_page(struct page *page) if (!folio_test_hugetlb(folio)) return 0;
- if (page_from_dynamic_pool(page) || page_in_dynamic_pool(page)) + if (page_from_or_in_dynamic_pool(page)) return -EBUSY;
spin_lock_irq(&hugetlb_lock); @@ -3079,7 +3079,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) struct folio *folio = page_folio(page); int ret = -EBUSY;
- if (page_from_dynamic_pool(page) || page_in_dynamic_pool(page)) + if (page_from_or_in_dynamic_pool(page)) return -EBUSY;
/* diff --git a/mm/migrate.c b/mm/migrate.c index 78c5b4aaf60d..b5d9d8feacfa 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2050,7 +2050,7 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private) if (folio_test_hugetlb(src)) { struct hstate *h = folio_hstate(src);
- if (page_in_dynamic_pool(folio_page(src, 0))) + if (page_from_dynamic_pool(folio_page(src, 0))) return NULL;
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); @@ -2569,7 +2569,7 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) return 0; }
- if (page_in_dynamic_pool(folio_page(folio, 0))) + if (page_from_dynamic_pool(folio_page(folio, 0))) return 0;
if (!folio_isolate_lru(folio)) diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 09eb445cfde9..03381be87b28 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -330,7 +330,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags, start_pfn = max(ALIGN_DOWN(isolate_pageblock, MAX_ORDER_NR_PAGES), zone->zone_start_pfn);
- if (page_in_dynamic_pool(pfn_to_page(isolate_pageblock))) + if (page_from_or_in_dynamic_pool(pfn_to_page(isolate_pageblock))) return -EBUSY;
if (skip_isolation) { @@ -562,7 +562,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, pfn < isolate_end - pageblock_nr_pages; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); - if (page && (page_in_dynamic_pool(page) || + if (page && (page_from_or_in_dynamic_pool(page) || set_migratetype_isolate(page, migratetype, flags, start_pfn, end_pfn))) { undo_isolate_page_range(isolate_start, pfn, migratetype);