hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAKLTF
--------------------------------
Add mTHP controls to sysfs to allow user space to disable large folio. For now, the control can be set to either `always` or `never` to enable or disable.
By default, large folio is enabled if it is supported by filesystem.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- Documentation/admin-guide/mm/transhuge.rst | 6 ++++ include/linux/huge_mm.h | 7 ++++ mm/filemap.c | 2 ++ mm/huge_memory.c | 38 +++++++++++++++++++++- mm/readahead.c | 2 ++ 5 files changed, 54 insertions(+), 1 deletion(-)
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index f9dc42f4451f5..f25381671dec2 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -232,6 +232,12 @@ it back by writing 0:: echo 0 >/sys/kernel/mm/transparent_hugepage/pcp_allow_high_order echo 4 >/sys/kernel/mm/transparent_hugepage/pcp_allow_high_order
+The kernel could enable or disable file-backed hugepages, which has no +effect on existed pagecache:: + + echo always >/sys/kernel/mm/transparent_hugepage/file_enabled + echo never >/sys/kernel/mm/transparent_hugepage/file_enabled + khugepaged will be automatically started when PMD-sized THP is enabled (either of the per-size anon control or the top-level control are set to "always" or "madvise"), and it'll be automatically shutdown when diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 1474fd9c63ad2..5122ecbcf4597 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -51,6 +51,7 @@ enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, + TRANSPARENT_HUGEPAGE_FILE_MTHP_FLAG, TRANSPARENT_HUGEPAGE_FILE_EXEC_THP_FLAG, TRANSPARENT_HUGEPAGE_FILE_EXEC_MTHP_FLAG, TRANSPARENT_HUGEPAGE_FILE_MAPPING_ALIGN_FLAG, @@ -308,6 +309,10 @@ static inline void count_mthp_stat(int order, enum mthp_stat_item item) (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
+#define file_mthp_enabled() \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_FILE_MTHP_FLAG)) + #define thp_anon_mapping_pmd_align() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_ANON_MAPPING_PMD_ALIGN_FLAG)) @@ -465,6 +470,8 @@ static inline void folio_prep_large_rmappable(struct folio *folio) {}
#define transparent_hugepage_flags 0UL
+#define file_mthp_enabled() false + #define thp_anon_mapping_pmd_align() NULL
#define thp_get_unmapped_area NULL diff --git a/mm/filemap.c b/mm/filemap.c index 630a3eec5a881..1d5d5f1c2b541 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1932,6 +1932,8 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
if (!mapping_large_folio_support(mapping)) order = 0; + if (order && !file_mthp_enabled()) + order = 0; if (order && mm_in_dynamic_pool(current->mm)) order = 0; if (order > MAX_PAGECACHE_ORDER) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a62c4dc2b9da7..fcc881a5a4af9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -65,7 +65,8 @@ unsigned long transparent_hugepage_flags __read_mostly = #endif (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| - (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); + (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)| + (1<<TRANSPARENT_HUGEPAGE_FILE_MTHP_FLAG);
static struct shrinker deferred_split_shrinker;
@@ -489,6 +490,40 @@ static void thp_flag_set(enum transparent_hugepage_flag flag, bool enable) clear_bit(flag, &transparent_hugepage_flags); }
+static ssize_t file_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + const char *output; + + if (test_bit(TRANSPARENT_HUGEPAGE_FILE_MTHP_FLAG, + &transparent_hugepage_flags)) + output = "[always] never"; + else + output = "always [never]"; + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t file_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + ssize_t ret = count; + + if (sysfs_streq(buf, "always")) { + set_bit(TRANSPARENT_HUGEPAGE_FILE_MTHP_FLAG, + &transparent_hugepage_flags); + } else if (sysfs_streq(buf, "never")) { + clear_bit(TRANSPARENT_HUGEPAGE_FILE_MTHP_FLAG, + &transparent_hugepage_flags); + } else + ret = -EINVAL; + + return ret; +} + +static struct kobj_attribute file_enabled_attr = __ATTR_RW(file_enabled); + static ssize_t thp_exec_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -592,6 +627,7 @@ static struct attribute *hugepage_attr[] = { #ifdef CONFIG_SHMEM &shmem_enabled_attr.attr, #endif + &file_enabled_attr.attr, &thp_exec_enabled_attr.attr, &thp_mapping_align_attr.attr, NULL, diff --git a/mm/readahead.c b/mm/readahead.c index d0b3de43cf23b..ab1c61f0c0360 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -504,6 +504,8 @@ void page_cache_ra_order(struct readahead_control *ractl,
if (!mapping_large_folio_support(mapping) || ra->size < 4) goto fallback; + if (!file_mthp_enabled()) + goto fallback; if (mm_in_dynamic_pool(current->mm)) goto fallback;