hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAHJKC CVE: NA
--------------------------------
Sometimes migrate THP is not beneficial, for example, when 64K page size is set on ARM64, THP will be 512M, migration may result in performance regression. This featrue add a switch to enalbe THP migration when do numa balancing, default is on, and can disable by: echo 0 > /sys/kernel/mm/transparent_hugepage/numa_migrate
If disabled it, migrate_misplaced_page will return fail and task still have change to migrate.
Signed-off-by: Nanyong Sun sunnanyong@huawei.com --- Documentation/admin-guide/mm/transhuge.rst | 5 +++++ arch/arm64/Kconfig | 1 + include/linux/huge_mm.h | 14 +++++++++++++ mm/Kconfig | 10 ++++++++++ mm/huge_memory.c | 23 ++++++++++++++++++++++ mm/migrate.c | 3 +++ 6 files changed, 56 insertions(+)
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 2bfb380e8380..97be39a0d5f5 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -160,6 +160,11 @@ library) may want to know the size (in bytes) of a transparent hugepage::
cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size
+THP migration can be disabled when do numa balance if +CONFIG_NUMA_MIGRATE_THP_CONTROL is on by:: + + echo 0 > /sys/kernel/mm/transparent_hugepage/numa_migrate + khugepaged will be automatically started when transparent_hugepage/enabled is set to "always" or "madvise, and it'll be automatically shutdown if it's set to "never". diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index cae54a9bf65d..b3d0c391a570 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -216,6 +216,7 @@ config ARM64 select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select HAVE_LIVEPATCH_WO_FTRACE + select NUMA_MIGRATE_THP_CONTROL if ARM64_64K_PAGES help ARM 64-bit (AArch64) Linux support.
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index efb370e79ac3..5442889245ef 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -90,6 +90,9 @@ enum transparent_hugepage_flag { #ifdef CONFIG_DEBUG_VM TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, #endif +#ifdef CONFIG_NUMA_MIGRATE_THP_CONTROL + TRANSPARENT_HUGEPAGE_NUMA_MIGRATE_ENABLE_FLAG, +#endif };
struct kobject; @@ -181,6 +184,14 @@ bool transparent_hugepage_active(struct vm_area_struct *vma); (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
+#ifdef CONFIG_NUMA_MIGRATE_THP_CONTROL +#define thp_can_numa_migrate() \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_NUMA_MIGRATE_ENABLE_FLAG)) +#else +#define thp_can_numa_migrate() 1 +#endif + unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags);
@@ -485,6 +496,9 @@ static inline bool thp_migration_supported(void) { return false; } + +#define thp_can_numa_migrate() 0 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/** diff --git a/mm/Kconfig b/mm/Kconfig index ccbad233f2b1..311b54078307 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1038,6 +1038,16 @@ config NUMABALANCING_MEM_SAMPLING
if unsure, say N to disable the NUMABALANCING_MEM_SAMPLING.
+config NUMA_MIGRATE_THP_CONTROL + bool "Control THP migration when numa balancing" + depends on NUMA_BALANCING && TRANSPARENT_HUGEPAGE + default n + help + Sometimes migrate THP is not beneficial, for example, when 64K page + size is set on ARM64, THP will be 512M, migration will be expensive. + This featrue add a switch to control the behavior of THP migration + when do numa balancing. + source "mm/damon/Kconfig"
endmenu diff --git a/mm/huge_memory.c b/mm/huge_memory.c index eb293d17a104..c02ffcbad96b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -56,6 +56,9 @@ unsigned long transparent_hugepage_flags __read_mostly = #endif (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| +#ifdef CONFIG_NUMA_MIGRATE_THP_CONTROL + (1<<TRANSPARENT_HUGEPAGE_NUMA_MIGRATE_ENABLE_FLAG)| +#endif (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
static struct shrinker deferred_split_shrinker; @@ -316,6 +319,23 @@ static ssize_t hpage_pmd_size_show(struct kobject *kobj, static struct kobj_attribute hpage_pmd_size_attr = __ATTR_RO(hpage_pmd_size);
+#ifdef CONFIG_NUMA_MIGRATE_THP_CONTROL +static ssize_t numa_migrate_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return single_hugepage_flag_show(kobj, attr, buf, + TRANSPARENT_HUGEPAGE_NUMA_MIGRATE_ENABLE_FLAG); +} +static ssize_t numa_migrate_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + return single_hugepage_flag_store(kobj, attr, buf, count, + TRANSPARENT_HUGEPAGE_NUMA_MIGRATE_ENABLE_FLAG); +} +static struct kobj_attribute numa_migrate_attr = + __ATTR(numa_migrate, 0644, numa_migrate_show, numa_migrate_store); +#endif + static struct attribute *hugepage_attr[] = { &enabled_attr.attr, &defrag_attr.attr, @@ -323,6 +343,9 @@ static struct attribute *hugepage_attr[] = { &hpage_pmd_size_attr.attr, #ifdef CONFIG_SHMEM &shmem_enabled_attr.attr, +#endif +#ifdef CONFIG_NUMA_MIGRATE_THP_CONTROL + &numa_migrate_attr.attr, #endif NULL, }; diff --git a/mm/migrate.c b/mm/migrate.c index 3f5b217d5af1..f7eb9b4c8668 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2157,6 +2157,9 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, */ compound = PageTransHuge(page);
+ if (compound && !thp_can_numa_migrate()) + return 0; + if (compound) new = alloc_misplaced_dst_page_thp; else