Offering: HULK hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I6HRGK
----------------------------------------
Use CONFIG_EXTEND_HUGEPAGE_MAPPING to isolate code introduced in a3425d4173784e41644abc3cc70d41aa2aef16fd.
Besides, use tab instead of space to match the format of Kconfig
Signed-off-by: Zhang Zekun zhangzekun11@huawei.com --- arch/arm64/Kconfig | 2 +- arch/arm64/configs/openeuler_defconfig | 1 + include/linux/mm.h | 2 ++ include/linux/vmalloc.h | 7 +++++++ mm/Kconfig | 6 ++++++ mm/vmalloc.c | 16 ++++++++++++++++ 6 files changed, 33 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a0bba8e5426a..e543b74f1fab 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2078,7 +2078,7 @@ config ASCEND_CHARGE_MIGRATE_HUGEPAGES config ASCEND_SHARE_POOL bool "Enable support for the Share Pool Memory" default n - depends on HAVE_ARCH_HUGE_VMALLOC + depends on HAVE_ARCH_HUGE_VMALLOC && EXTEND_HUGEPAGE_MAPPING select ARCH_USES_HIGH_VMA_FLAGS help This feature allows multiple processes to share virtual memory both diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index afe1060947e2..3f1532646775 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -1131,6 +1131,7 @@ CONFIG_PIN_MEMORY=y CONFIG_PID_RESERVE=y CONFIG_MEMORY_RELIABLE=y # CONFIG_CLEAR_FREELIST_PAGE is not set +CONFIG_EXTEND_HUGEPAGE_MAPPING=y
# # Data Access Monitoring diff --git a/include/linux/mm.h b/include/linux/mm.h index 430eb04ca390..0b5ce84212d7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -235,11 +235,13 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
+#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING /* to align the pointer to the (next) PMD hugepage boundary */ #define PMD_ALIGN(addr) ALIGN(addr, PMD_SIZE)
/* test whether an address (unsigned long or pointer) is aligned to PMD_SIZE */ #define PMD_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PMD_SIZE) +#endif
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 49c94afce25b..1ebe364ed29a 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -27,7 +27,9 @@ struct notifier_block; /* in notifier.h */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ #define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ +#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING #define VM_HUGE_PAGES 0x00001000 /* used for vmalloc hugepages */ +#endif #ifdef CONFIG_ASCEND_SHARE_POOL #define VM_SHAREPOOL 0x00002000 /* remapped to sharepool */ #else @@ -142,8 +144,11 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller); void *vmalloc_no_huge(unsigned long size); + +#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING extern void *vmalloc_hugepage(unsigned long size); extern void *vmalloc_hugepage_user(unsigned long size); +#endif
extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); @@ -160,6 +165,7 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff);
+#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING extern void *vmap_hugepage(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot); extern int remap_vmalloc_hugepage_range_partial(struct vm_area_struct *vma, @@ -167,6 +173,7 @@ extern int remap_vmalloc_hugepage_range_partial(struct vm_area_struct *vma, unsigned long pgoff, unsigned long size); extern int remap_vmalloc_hugepage_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); +#endif
/* * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values diff --git a/mm/Kconfig b/mm/Kconfig index be7fd4ed2c4f..f66457168de9 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -992,6 +992,12 @@ config CLEAR_FREELIST_PAGE To enable this feature, kernel parameter "clear_freelist" also needs to be added.
+config EXTEND_HUGEPAGE_MAPPING + bool "Extend for hugepages mapping" + depends on ARM64 + default n + help + Introduce vmalloc/vmap/remap interfaces that handle only hugepages.
source "mm/damon/Kconfig"
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d7a68eb0db42..e27cd716ca95 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -578,6 +578,7 @@ static int vmap_pages_range(unsigned long addr, unsigned long end, return err; }
+#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING static int vmap_hugepages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { @@ -609,6 +610,7 @@ static int vmap_hugepages_range(unsigned long addr, unsigned long end,
return err; } +#endif
/** * map_kernel_range_noflush - map kernel VM area with the specified pages @@ -2792,6 +2794,7 @@ void *vmap(struct page **pages, unsigned int count, } EXPORT_SYMBOL(vmap);
+#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING /** * vmap_hugepage - map an array of huge pages into virtually contiguous space * @pages: array of huge page pointers (only the header) @@ -2830,6 +2833,7 @@ void *vmap_hugepage(struct page **pages, unsigned int count, return area->addr; } EXPORT_SYMBOL(vmap_hugepage); +#endif
#ifdef CONFIG_VMAP_PFN struct vmap_pfn_data { @@ -3015,7 +3019,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, size_per_node = size; if (node == NUMA_NO_NODE) size_per_node /= num_online_nodes(); +#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING if (size_per_node >= PMD_SIZE || vm_flags & VM_HUGE_PAGES) { +#else + if (size_per_node >= PMD_SIZE) { +#endif shift = PMD_SHIFT; align = max(real_align, 1UL << shift); size = ALIGN(real_size, 1UL << shift); @@ -3050,8 +3058,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, return addr;
fail: +#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING /* User could specify VM_HUGE_PAGES to alloc only hugepages. */ if (shift > PAGE_SHIFT && !(vm_flags & VM_HUGE_PAGES)) { +#else + if (shift > PAGE_SHIFT) { +#endif shift = PAGE_SHIFT; align = real_align; size = real_size; @@ -3261,6 +3273,7 @@ void *vmalloc_32_user(unsigned long size) } EXPORT_SYMBOL(vmalloc_32_user);
+#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING /** * vmalloc_hugepage - allocate virtually contiguous hugetlb memory * @size: allocation size @@ -3298,6 +3311,7 @@ void *vmalloc_hugepage_user(unsigned long size) __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_hugepage_user); +#endif
/* * small helper routine , copy contents to buf from addr. @@ -3620,6 +3634,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, } EXPORT_SYMBOL(remap_vmalloc_range);
+#ifdef CONFIG_EXTEND_HUGEPAGE_MAPPING /** * remap_vmalloc_hugepage_range_partial - map vmalloc hugepages * to userspace @@ -3706,6 +3721,7 @@ int remap_vmalloc_hugepage_range(struct vm_area_struct *vma, void *addr, vma->vm_end - vma->vm_start); } EXPORT_SYMBOL(remap_vmalloc_hugepage_range); +#endif
void free_vm_area(struct vm_struct *area) {