hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID65ME ---------------------------------------- Add zero_page_full_cow sysfs knob for zero hugepage, and the value is '0' by default. When it's enabled, kernel will try to allocate PMD page for PMD-mappable zero anonymous mapping on WP mode. Signed-off-by: Zhang Qilong <zhangqilong3@huawei.com> --- Documentation/admin-guide/mm/transhuge.rst | 7 +++++++ include/linux/huge_mm.h | 1 + mm/huge_memory.c | 21 +++++++++++++++++++++ 3 files changed, 29 insertions(+) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 70176fc3f94c..55f4ede1f8f8 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -201,10 +201,17 @@ page fault to anonymous mapping. It's possible to disable huge zero page by writing 0 or enable it back by writing 1:: echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page +By default kernel disable zero page full CoW. When it's enabled, kernel +will try to allocate PMD page for PMD-mappable zero anonymous mapping on +WP mode. We can enable it by writing 1 or disable it back by writing 0:: + + echo 0 >/sys/kernel/mm/transparent_hugepage/zero_page_full_cow + echo 1 >/sys/kernel/mm/transparent_hugepage/zero_page_full_cow + Some userspace (such as a test program, or an optimized memory allocation library) may want to know the size (in bytes) of a PMD-mappable transparent hugepage:: cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index cfe42c43b55b..47a6634e74c4 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -55,10 +55,11 @@ enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FILE_EXEC_THP_FLAG, TRANSPARENT_HUGEPAGE_FILE_EXEC_MTHP_FLAG, TRANSPARENT_HUGEPAGE_FILE_MAPPING_ALIGN_FLAG, TRANSPARENT_HUGEPAGE_ANON_MAPPING_ALIGN_FLAG, TRANSPARENT_HUGEPAGE_ANON_MAPPING_PMD_ALIGN_FLAG, + TRANSPARENT_HUGEPAGE_ZERO_PAGE_FULL_COW_FLAG, }; struct kobject; struct kobj_attribute; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a28dda799978..11162cf63f44 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -601,10 +601,26 @@ static ssize_t thp_mapping_align_store(struct kobject *kobj, return count; } static struct kobj_attribute thp_mapping_align_attr = __ATTR_RW(thp_mapping_align); +static ssize_t zero_page_full_cow_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return single_hugepage_flag_show(kobj, attr, buf, + TRANSPARENT_HUGEPAGE_ZERO_PAGE_FULL_COW_FLAG); +} +static ssize_t zero_page_full_cow_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + return single_hugepage_flag_store(kobj, attr, buf, count, + TRANSPARENT_HUGEPAGE_ZERO_PAGE_FULL_COW_FLAG); +} + +static struct kobj_attribute zero_page_full_cow_attr = + __ATTR_RW(zero_page_full_cow); + static struct attribute *hugepage_attr[] = { &enabled_attr.attr, &defrag_attr.attr, &use_zero_page_attr.attr, &pcp_allow_high_order_attr.attr, @@ -613,10 +629,11 @@ static struct attribute *hugepage_attr[] = { &shmem_enabled_attr.attr, #endif &file_enabled_attr.attr, &thp_exec_enabled_attr.attr, &thp_mapping_align_attr.attr, + &zero_page_full_cow_attr.attr, NULL, }; static const struct attribute_group hugepage_attr_group = { .attrs = hugepage_attr, @@ -2012,10 +2029,14 @@ static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct mmu_notifier_range range; struct folio *folio; vm_fault_t ret = 0; + if (!test_bit(TRANSPARENT_HUGEPAGE_ZERO_PAGE_FULL_COW_FLAG, + &transparent_hugepage_flags)) + return VM_FAULT_FALLBACK; + folio = vma_alloc_anon_folio_pmd(vma, vmf->address); if (unlikely(!folio)) return VM_FAULT_FALLBACK; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr, -- 2.43.0