From: Ding Tianhong dingtianhong@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
-------------------------------------------------
The commit 59a57a82fb2a ("mm/vmalloc: Hugepage vmalloc mappings") would enable the vmalloc for hugepage default when the alloc size is bigger than the PMD_SIZE, it looks like the transparent hugepage for mmap, the driver could not control the hugepage accurately and be break the logic, now the share pool already export the vmalloc_hugepage_xxx function to control the vmalloc hugepage allocation, it looks like the static hugepage for vmalloc, so disable the transparent hugepage function.
This patch also fix the problem of breaking the kabi of vm_struct, the user could applied it for commercial version.
Fixes: 59a57a82fb2a ("mm/vmalloc: Hugepage vmalloc mappings") Signed-off-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 51 ++++++++++++++++++++++++++++---------- include/linux/vmalloc.h | 1 - mm/vmalloc.c | 47 ++++++++++++----------------------- 3 files changed, 54 insertions(+), 45 deletions(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index c3120b7b24948..4a18c88d5a10e 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -211,15 +211,6 @@ static inline void sp_area_work_around(struct vm_unmapped_area_info *info)
extern struct page *sp_alloc_pages(struct vm_struct *area, gfp_t mask, unsigned int page_order, int node); - -static inline void sp_free_pages(struct page *page, struct vm_struct *area) -{ - if (PageHuge(page)) - put_page(page); - else - __free_pages(page, area->page_order); -} - static inline bool sp_check_vm_share_pool(unsigned long vm_flags) { if (enable_ascend_share_pool && (vm_flags & VM_SHARE_POOL)) @@ -264,6 +255,30 @@ extern void *buff_vzalloc_hugepage_user(unsigned long size);
void sp_exit_mm(struct mm_struct *mm);
+static inline bool is_vmalloc_huge(unsigned long vm_flags) +{ + if (enable_ascend_share_pool && (vm_flags & VM_HUGE_PAGES)) + return true; + + return false; +} + +static inline bool is_vmalloc_sharepool(unsigned long vm_flags) +{ + if (enable_ascend_share_pool && (vm_flags & VM_SHAREPOOL)) + return true; + + return false; +} + +static inline void sp_free_pages(struct page *page, struct vm_struct *area) +{ + if (PageHuge(page)) + put_page(page); + else + __free_pages(page, is_vmalloc_huge(area->flags) ? PMD_SHIFT - PAGE_SHIFT : 0); +} + #else
static inline int sp_group_add_task(int pid, int spg_id) @@ -400,10 +415,6 @@ static inline struct page *sp_alloc_pages(void *area, gfp_t mask, return NULL; }
-static inline void sp_free_pages(struct page *page, struct vm_struct *area) -{ -} - static inline bool sp_check_vm_share_pool(unsigned long vm_flags) { return false; @@ -448,6 +459,20 @@ static inline void *buff_vzalloc_hugepage_user(unsigned long size) return NULL; }
+static inline bool is_vmalloc_huge(struct vm_struct *vm) +{ + return NULL; +} + +static inline bool is_vmalloc_sharepool(struct vm_struct *vm) +{ + return NULL; +} + +static inline void sp_free_pages(struct page *page, struct vm_struct *area) +{ +} + #endif
#endif /* LINUX_SHARE_POOL_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index bb814f6418fd9..298eff5579b21 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -43,7 +43,6 @@ struct vm_struct { unsigned long size; unsigned long flags; struct page **pages; - unsigned int page_order; unsigned int nr_pages; phys_addr_t phys_addr; const void *caller; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 37b4762871142..8c70131e0b078 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2354,6 +2354,7 @@ struct vm_struct *remove_vm_area(const void *addr) static void __vunmap(const void *addr, int deallocate_pages) { struct vm_struct *area; + unsigned int page_order = 0;
if (!addr) return; @@ -2369,13 +2370,14 @@ static void __vunmap(const void *addr, int deallocate_pages) return; }
-#ifdef CONFIG_ASCEND_SHARE_POOL /* unmap a sharepool vm area will cause meamleak! */ - if (area->flags & VM_SHAREPOOL) { + if (is_vmalloc_sharepool(area->flags)) { WARN(1, KERN_ERR "Memory leak due to vfree() sharepool vm area (%p) !\n", addr); return; } -#endif + + if (is_vmalloc_huge(area->flags)) + page_order = PMD_SHIFT - PAGE_SHIFT;
debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); @@ -2384,14 +2386,14 @@ static void __vunmap(const void *addr, int deallocate_pages) if (deallocate_pages) { int i;
- for (i = 0; i < area->nr_pages; i += 1U << area->page_order) { + for (i = 0; i < area->nr_pages; i += 1U << page_order) { struct page *page = area->pages[i];
BUG_ON(!page); if (sp_is_enabled()) sp_free_pages(page, area); else - __free_pages(page, area->page_order); + __free_pages(page, page_order); }
kvfree(area->pages); @@ -2589,7 +2591,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
area->pages = pages; area->nr_pages = nr_pages; - area->page_order = page_order;
for (i = 0; i < area->nr_pages; i += 1U << page_order) { struct page *page; @@ -2657,27 +2658,17 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, if (!size || (size >> PAGE_SHIFT) > totalram_pages) goto fail;
- if (vmap_allow_huge && (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))) { - unsigned long size_per_node; - + if (vmap_allow_huge && (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) && is_vmalloc_huge(vm_flags)) { /* - * Try huge pages. Only try for PAGE_KERNEL allocations, - * others like modules don't yet expect huge pages in - * their allocations due to apply_to_page_range not - * supporting them. + * Alloc huge pages. Only valid for PAGE_KERNEL allocations and + * VM_HUGE_PAGES flags. */
- size_per_node = size; - if (node == NUMA_NO_NODE && !sp_is_enabled()) - size_per_node /= num_online_nodes(); - if (size_per_node >= PMD_SIZE) { - shift = PMD_SHIFT; - align = max(real_align, 1UL << shift); - size = ALIGN(real_size, 1UL << shift); - } + shift = PMD_SHIFT; + align = max(real_align, 1UL << shift); + size = ALIGN(real_size, 1UL << shift); }
-again: size = PAGE_ALIGN(size); area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); @@ -2706,12 +2697,6 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, return addr;
fail: - if (shift > PAGE_SHIFT) { - shift = PAGE_SHIFT; - align = real_align; - size = real_size; - goto again; - }
if (!area) { /* Warn for area allocation, page allocations already warn */ @@ -3776,7 +3761,7 @@ static int s_show(struct seq_file *m, void *p) seq_printf(m, " %pS", v->caller);
if (v->nr_pages) - seq_printf(m, " pages=%d order=%d", v->nr_pages, v->page_order); + seq_printf(m, " pages=%d", v->nr_pages);
if (v->phys_addr) seq_printf(m, " phys=%pa", &v->phys_addr); @@ -3796,8 +3781,8 @@ static int s_show(struct seq_file *m, void *p) if (is_vmalloc_addr(v->pages)) seq_puts(m, " vpages");
- if (sp_is_enabled()) - seq_printf(m, " order=%d", v->page_order); + if (is_vmalloc_huge(v->flags)) + seq_printf(m, " order=%d", PMD_SHIFT - PAGE_SHIFT);
show_numa_info(m, v); seq_putc(m, '\n');