hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8S9BY CVE: NA
--------------------------------
There is no way to alloc THP pages from dpool for now, so must disable THP for task attached with dpool, oterwise it may prefer to alloc THP pages from buddy system which is not desired.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/dynamic_pool.h | 14 ++++++++++++++ mm/dynamic_pool.c | 13 +++++++++++++ mm/khugepaged.c | 18 ++++++++++++++++++ mm/memory.c | 7 +++++-- mm/shmem.c | 3 +++ 5 files changed, 53 insertions(+), 2 deletions(-)
diff --git a/include/linux/dynamic_pool.h b/include/linux/dynamic_pool.h index 34eec9267fcd..6a4bc25d22a8 100644 --- a/include/linux/dynamic_pool.h +++ b/include/linux/dynamic_pool.h @@ -74,6 +74,15 @@ struct dpool_info { struct range pfn_ranges[0]; };
+bool __task_in_dynamic_pool(struct task_struct *tsk); +static inline bool task_in_dynamic_pool(struct task_struct *tsk) +{ + if (!dpool_enabled) + return false; + + return __task_in_dynamic_pool(tsk); +} + static inline bool page_from_dynamic_pool(struct page *page) { if (!dpool_enabled) @@ -126,6 +135,11 @@ static inline bool page_from_dynamic_pool(struct page *page) return false; }
+static inline bool task_in_dynamic_pool(struct task_struct *tsk) +{ + return false; +} + static inline int dynamic_pool_can_attach(struct task_struct *tsk, struct mem_cgroup *memcg) { diff --git a/mm/dynamic_pool.c b/mm/dynamic_pool.c index c63089008f3c..1844888e8648 100644 --- a/mm/dynamic_pool.c +++ b/mm/dynamic_pool.c @@ -136,6 +136,19 @@ static struct dynamic_pool *dpool_get_from_page(struct page *page) return dpool; }
+bool __task_in_dynamic_pool(struct task_struct *tsk) +{ + struct dynamic_pool *dpool; + + if (!dpool_enabled) + return false; + + dpool = dpool_get_from_task(tsk); + dpool_put(dpool); + + return !!dpool; +} + /* === demote and promote function ==================================== */
static void dpool_disable_pcp_pool(struct dynamic_pool *dpool, bool drain); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 6f2787d3b682..50babcbf11ce 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -20,6 +20,7 @@ #include <linux/swapops.h> #include <linux/shmem_fs.h> #include <linux/ksm.h> +#include <linux/dynamic_pool.h>
#include <asm/tlb.h> #include <asm/pgalloc.h> @@ -361,6 +362,10 @@ int hugepage_madvise(struct vm_area_struct *vma, if (mm_has_pgste(vma->vm_mm)) return 0; #endif + + if (task_in_dynamic_pool(current)) + return -EINVAL; + *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; /* @@ -1369,6 +1374,11 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, goto out_unmap; }
+ if (page_from_dynamic_pool(page)) { + result = SCAN_FAIL; + goto out_unmap; + } + /* * Check if the page has any GUP (or other external) pins. * @@ -2296,6 +2306,11 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, break; }
+ if (page_from_dynamic_pool(page)) { + result = SCAN_FAIL; + break; + } + /* * We probably should check if the page is referenced here, but * nobody would transfer pte_young() to PageReferenced() for us. @@ -2726,6 +2741,9 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) return -EINVAL;
+ if (task_in_dynamic_pool(current)) + return -EINVAL; + cc = kmalloc(sizeof(*cc), GFP_KERNEL); if (!cc) return -ENOMEM; diff --git a/mm/memory.c b/mm/memory.c index 944c2ce2756b..22e0150acec4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -78,6 +78,7 @@ #include <linux/vmalloc.h> #include <linux/sched/sysctl.h> #include <linux/userswap.h> +#include <linux/dynamic_pool.h>
#include <trace/events/kmem.h>
@@ -5107,7 +5108,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, return VM_FAULT_OOM; retry_pud: if (pud_none(*vmf.pud) && - hugepage_vma_check(vma, vm_flags, false, true, true)) { + hugepage_vma_check(vma, vm_flags, false, true, true) && + !task_in_dynamic_pool(current)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; @@ -5141,7 +5143,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, goto retry_pud;
if (pmd_none(*vmf.pmd) && - hugepage_vma_check(vma, vm_flags, false, true, true)) { + hugepage_vma_check(vma, vm_flags, false, true, true) && + !task_in_dynamic_pool(current)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; diff --git a/mm/shmem.c b/mm/shmem.c index b44bfad90f8d..de9a884b10e8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -79,6 +79,7 @@ static struct vfsmount *shm_mnt; #include <linux/rmap.h> #include <linux/uuid.h> #include <linux/quotaops.h> +#include <linux/dynamic_pool.h>
#include <linux/uaccess.h>
@@ -2019,6 +2020,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (!shmem_is_huge(inode, index, false, vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0)) goto alloc_nohuge; + if (task_in_dynamic_pool(current)) + goto alloc_nohuge;
huge_gfp = vma_thp_gfp_mask(vma); huge_gfp = limit_gfp_mask(huge_gfp, gfp);