hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CDZZ
--------------------------------
With dynamic pool feature, some memory is isolated in the dynamic pool. When try to compact memory, the kcompactd thread will scan all memory, althougt some memory is belonging to dynamic pool, kcompactd still try to migrate them. After migration, these memory will free to dynamic pool rather than buddy system, which results the free pages in buddy system decreased.
Since it is unnecessary to compact the memory in the dynamic pool, skip migrate them to fix the problem.
The same problem also existed in alloc_contig_range(), offline_pages() and numa balancing. Skip it again in these three scenarios.
In addition to this, we have to consider the migration of hugepage, if a hugepage is from dynamic pool, we should not allow to migrate it.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- mm/compaction.c | 4 ++++ mm/migrate.c | 7 +++++++ mm/page_isolation.c | 9 +++++++-- 3 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c index 771e9629b95c..e9fe6777c8a3 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -23,6 +23,7 @@ #include <linux/freezer.h> #include <linux/page_owner.h> #include <linux/psi.h> +#include <linux/dynamic_pool.h> #include "internal.h"
#ifdef CONFIG_COMPACTION @@ -2024,6 +2025,9 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) continue; }
+ if (page_in_dynamic_pool(page)) + continue; + /* * If isolation recently failed, do not retry. Only check the * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock diff --git a/mm/migrate.c b/mm/migrate.c index 46bed84d5438..16e7e3e60d2a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -50,6 +50,7 @@ #include <linux/random.h> #include <linux/sched/sysctl.h> #include <linux/memory-tiers.h> +#include <linux/dynamic_pool.h>
#include <asm/tlbflush.h>
@@ -1999,6 +2000,9 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private) if (folio_test_hugetlb(src)) { struct hstate *h = folio_hstate(src);
+ if (page_in_dynamic_pool(folio_page(src, 0))) + return NULL; + gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); return alloc_hugetlb_folio_nodemask(h, nid, mtc->nmask, gfp_mask); @@ -2507,6 +2511,9 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) return 0; }
+ if (page_in_dynamic_pool(folio_page(folio, 0))) + return 0; + if (!folio_isolate_lru(folio)) return 0;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index bcf99ba747a0..fefc8a926944 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -10,6 +10,7 @@ #include <linux/hugetlb.h> #include <linux/page_owner.h> #include <linux/migrate.h> +#include <linux/dynamic_pool.h> #include "internal.h"
#define CREATE_TRACE_POINTS @@ -329,6 +330,9 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags, start_pfn = max(ALIGN_DOWN(isolate_pageblock, MAX_ORDER_NR_PAGES), zone->zone_start_pfn);
+ if (page_in_dynamic_pool(pfn_to_page(isolate_pageblock))) + return -EBUSY; + if (skip_isolation) { int mt __maybe_unused = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
@@ -558,8 +562,9 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, pfn < isolate_end - pageblock_nr_pages; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); - if (page && set_migratetype_isolate(page, migratetype, flags, - start_pfn, end_pfn)) { + if (page && (page_in_dynamic_pool(page) || + set_migratetype_isolate(page, migratetype, flags, + start_pfn, end_pfn))) { undo_isolate_page_range(isolate_start, pfn, migratetype); unset_migratetype_isolate( pfn_to_page(isolate_end - pageblock_nr_pages),