hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IAF8L3
--------------------------------
It's not appropriate to use current task to check THP for a vma, because the vma is not belong to current task when called from process_madvise() or damos_madvise(). Use vma->vm_mm instead of current task.
For shmem, if called from file operations, there's no vma. In such case, use current-mm is enough if need to allocate pages.
Fixes: 35e812f6fd13 ("mm/dynamic_pool: disable THP for task attached with dpool") Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/dynamic_pool.h | 8 ++++---- mm/dynamic_pool.c | 19 +++++++++++++++++-- mm/khugepaged.c | 4 ++-- mm/memory.c | 4 ++-- mm/shmem.c | 2 +- 5 files changed, 26 insertions(+), 11 deletions(-)
diff --git a/include/linux/dynamic_pool.h b/include/linux/dynamic_pool.h index 1d41c6a853c3..3b54c384ec5a 100644 --- a/include/linux/dynamic_pool.h +++ b/include/linux/dynamic_pool.h @@ -77,13 +77,13 @@ struct dpool_info { struct range pfn_ranges[]; };
-bool __task_in_dynamic_pool(struct task_struct *tsk); -static inline bool task_in_dynamic_pool(struct task_struct *tsk) +bool __mm_in_dynamic_pool(struct mm_struct *mm); +static inline bool mm_in_dynamic_pool(struct mm_struct *mm) { if (!dpool_enabled) return false;
- return __task_in_dynamic_pool(tsk); + return __mm_in_dynamic_pool(mm); }
static inline bool page_from_dynamic_pool(struct page *page) @@ -140,7 +140,7 @@ static inline bool page_from_dynamic_pool(struct page *page) return false; }
-static inline bool task_in_dynamic_pool(struct task_struct *tsk) +static inline bool mm_in_dynamic_pool(struct mm_struct *mm) { return false; } diff --git a/mm/dynamic_pool.c b/mm/dynamic_pool.c index 2d41bb61ceb6..7b0bbf0c348f 100644 --- a/mm/dynamic_pool.c +++ b/mm/dynamic_pool.c @@ -137,14 +137,29 @@ static struct dynamic_pool *dpool_get_from_page(struct page *page) return dpool; }
-bool __task_in_dynamic_pool(struct task_struct *tsk) +static struct dynamic_pool *dpool_get_from_mm(struct mm_struct *mm) +{ + struct dynamic_pool *dpool = NULL; + struct mem_cgroup *memcg; + + memcg = get_mem_cgroup_from_mm(mm); + if (!memcg) + return NULL; + + dpool = dpool_get_from_memcg(memcg); + css_put(&memcg->css); + + return dpool; +} + +bool __mm_in_dynamic_pool(struct mm_struct *mm) { struct dynamic_pool *dpool;
if (!dpool_enabled) return false;
- dpool = dpool_get_from_task(tsk); + dpool = dpool_get_from_mm(mm); dpool_put(dpool);
return !!dpool; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 06f31ad4452e..bc1aaf5b99ed 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -363,7 +363,7 @@ int hugepage_madvise(struct vm_area_struct *vma, return 0; #endif
- if (task_in_dynamic_pool(current)) + if (mm_in_dynamic_pool(vma->vm_mm)) return -EINVAL;
*vm_flags &= ~VM_NOHUGEPAGE; @@ -2743,7 +2743,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER)) return -EINVAL;
- if (task_in_dynamic_pool(current)) + if (mm_in_dynamic_pool(mm)) return -EINVAL;
cc = kmalloc(sizeof(*cc), GFP_KERNEL); diff --git a/mm/memory.c b/mm/memory.c index 49a5618661d8..63493bef46e9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5455,7 +5455,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (pud_none(*vmf.pud) && thp_vma_allowable_order(vma, vm_flags, TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER) && - !task_in_dynamic_pool(current)) { + !mm_in_dynamic_pool(mm)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; @@ -5491,7 +5491,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (pmd_none(*vmf.pmd) && thp_vma_allowable_order(vma, vm_flags, TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER) && - !task_in_dynamic_pool(current)) { + !mm_in_dynamic_pool(mm)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; diff --git a/mm/shmem.c b/mm/shmem.c index 079f47192bdb..af01e8d283f4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2031,7 +2031,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (!shmem_is_huge(inode, index, false, vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0)) goto alloc_nohuge; - if (task_in_dynamic_pool(current)) + if (mm_in_dynamic_pool(vma ? vma->vm_mm : current->mm)) goto alloc_nohuge;
huge_gfp = vma_thp_gfp_mask(vma);