hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9H84X CVE: NA
-------------------------------------------------
Let's the do_sync_mmap_readahead() to try to enable THP for exec mapping, which could ruduce iTLB miss for large application with big text segment.
Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com --- Documentation/admin-guide/mm/transhuge.rst | 7 ++++++ include/linux/huge_mm.h | 1 + mm/filemap.c | 27 ++++++++++++++++++++ mm/huge_memory.c | 29 ++++++++++++++++++++++ 4 files changed, 64 insertions(+)
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 04eb45a2f940..936da10c5260 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -202,6 +202,13 @@ PMD-mappable transparent hugepage::
cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size
+The kernel tries to use huge, PMD-mappable page on read page fault for +file exec mapping if CONFIG_READ_ONLY_THP_FOR_FS enabled. It's possible +to enabled the feature by writing 1 or disablt by writing 0:: + + echo 0x0 >/sys/kernel/mm/transparent_hugepage/thp_exec_enabled + echo 0x1 >/sys/kernel/mm/transparent_hugepage/thp_exec_enabled + khugepaged will be automatically started when one or more hugepage sizes are enabled (either by directly setting "always" or "madvise", or by setting "inherit" while the top-level enabled is set to "always" diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 5adb86af35fc..885888e38e26 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -50,6 +50,7 @@ enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, + TRANSPARENT_HUGEPAGE_FILE_EXEC_THP_FLAG, };
struct kobject; diff --git a/mm/filemap.c b/mm/filemap.c index 96a49748772c..e8a7bd88bf90 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -45,6 +45,7 @@ #include <linux/migrate.h> #include <linux/pipe_fs_i.h> #include <linux/splice.h> +#include <linux/huge_mm.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include "internal.h" @@ -3113,6 +3114,29 @@ static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, return 1; }
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define file_exec_thp_enabled() \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_FILE_EXEC_THP_FLAG)) + +static inline void try_enable_file_exec_thp(struct vm_area_struct *vma, + unsigned long *vm_flags, + struct file *file) +{ + if (!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) + return; + + if (!is_exec_mapping(*vm_flags)) + return; + + if (file->f_op->get_unmapped_area != thp_get_unmapped_area) + return; + + if (file_exec_thp_enabled()) + hugepage_madvise(vma, vm_flags, MADV_HUGEPAGE); +} +#endif + /* * Synchronous readahead happens when we don't even find a page in the page * cache at all. We don't want to perform IO under the mmap sem, so if we have @@ -3131,6 +3155,9 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) unsigned int mmap_miss;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* Try enable thp for exec mapping by default */ + try_enable_file_exec_thp(vmf->vma, &vm_flags, file); + /* Use the readahead code, even if readahead is disabled */ if (vm_flags & VM_HUGEPAGE) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 87edef93df10..6c277b55544c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -425,6 +425,32 @@ static ssize_t hpage_pmd_size_show(struct kobject *kobj, static struct kobj_attribute hpage_pmd_size_attr = __ATTR_RO(hpage_pmd_size);
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS +static ssize_t thp_exec_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return single_hugepage_flag_show(kobj, attr, buf, + TRANSPARENT_HUGEPAGE_FILE_EXEC_THP_FLAG); +} +static ssize_t thp_exec_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + size_t ret = single_hugepage_flag_store(kobj, attr, buf, count, + TRANSPARENT_HUGEPAGE_FILE_EXEC_THP_FLAG); + if (ret > 0) { + int err = start_stop_khugepaged(); + + if (err) + ret = err; + } + + return ret; +} +static struct kobj_attribute thp_exec_enabled_attr = + __ATTR_RW(thp_exec_enabled); + +#endif + static struct attribute *hugepage_attr[] = { &enabled_attr.attr, &defrag_attr.attr, @@ -432,6 +458,9 @@ static struct attribute *hugepage_attr[] = { &hpage_pmd_size_attr.attr, #ifdef CONFIG_SHMEM &shmem_enabled_attr.attr, +#endif +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + &thp_exec_enabled_attr.attr, #endif NULL, };