hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8S9BY CVE: NA
--------------------------------
Add function to alloc page from dpool. The process in the corresponding memcg will prefer to alloc pages from dpool. After the dpool is exhausted, it will fallback to alloc pages from buddy system.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/dynamic_pool.h | 28 ++++++++ mm/dynamic_pool.c | 128 +++++++++++++++++++++++++++++++++++ mm/internal.h | 1 + mm/page_alloc.c | 22 +++++- 4 files changed, 178 insertions(+), 1 deletion(-)
diff --git a/include/linux/dynamic_pool.h b/include/linux/dynamic_pool.h index d32bd797f991..bb6f3e178881 100644 --- a/include/linux/dynamic_pool.h +++ b/include/linux/dynamic_pool.h @@ -54,7 +54,18 @@ struct dynamic_pool { unsigned long total_pages; };
+static inline bool page_from_dynamic_pool(struct page *page) +{ + if (!dpool_enabled) + return false; + + return PagePool(page); +} + int dynamic_pool_can_attach(struct task_struct *tsk, struct mem_cgroup *memcg); +struct page *dynamic_pool_alloc_page(gfp_t gfp, unsigned int order, + unsigned int alloc_flags); +void dynamic_pool_free_page(struct page *page);
void dynamic_pool_inherit(struct mem_cgroup *parent, struct mem_cgroup *memcg); int dynamic_pool_destroy(struct cgroup *cgrp, bool *clear_css_online); @@ -67,14 +78,31 @@ int dynamic_pool_reserve_hugepage(struct mem_cgroup *memcg, unsigned long nr_pages, int type);
#else +#define dpool_enabled 0 + struct dynamic_pool {};
+static inline bool page_from_dynamic_pool(struct page *page) +{ + return false; +} + static inline int dynamic_pool_can_attach(struct task_struct *tsk, struct mem_cgroup *memcg) { return 0; }
+static inline struct page *dynamic_pool_alloc_page(gfp_t gfp, unsigned int order, + unsigned int alloc_flags) +{ + return NULL; +} + +static inline void dynamic_pool_free_page(struct page *page) +{ +} + static inline void dynamic_pool_inherit(struct mem_cgroup *parent, struct mem_cgroup *memcg) { diff --git a/mm/dynamic_pool.c b/mm/dynamic_pool.c index 7179e0834e04..6540dab89894 100644 --- a/mm/dynamic_pool.c +++ b/mm/dynamic_pool.c @@ -95,6 +95,25 @@ static struct dynamic_pool *dpool_get_from_task(struct task_struct *tsk) return dpool; }
+static struct dynamic_pool *dpool_get_from_page(struct page *page) +{ + struct dynamic_pool *dpool = NULL; + unsigned long idx; + + rcu_read_lock(); + idx = hugepage_index(page_to_pfn(page)); + read_lock(&dpool_page_array_rwlock); + if (idx < dpool_page_array->count) + dpool = dpool_page_array->dpool[idx]; + read_unlock(&dpool_page_array_rwlock); + + if (!dpool_get_unless_zero(dpool)) + dpool = NULL; + rcu_read_unlock(); + + return dpool; +} + /* === demote and promote function ==================================== */
/* @@ -392,6 +411,115 @@ int dynamic_pool_can_attach(struct task_struct *tsk, struct mem_cgroup *memcg) return ret; }
+static bool dpool_should_alloc(gfp_t gfp_mask, unsigned int order) +{ + gfp_t gfp = gfp_mask & GFP_HIGHUSER_MOVABLE; + + if (current->flags & PF_KTHREAD) + return false; + + if (order != 0) + return false; + + /* + * The cgroup only charges anonymous and file pages from usespage. + * some filesystem maybe has masked out the __GFP_IO | __GFP_FS + * to avoid recursive memory request. eg: loop device, xfs. + */ + if ((gfp | __GFP_IO | __GFP_FS) != GFP_HIGHUSER_MOVABLE) + return false; + + return true; +} + +struct page *dynamic_pool_alloc_page(gfp_t gfp, unsigned int order, + unsigned int alloc_flags) +{ + struct dynamic_pool *dpool; + struct pages_pool *pool; + struct page *page = NULL; + unsigned long flags; + + if (!dpool_enabled) + return NULL; + + if (!dpool_should_alloc(gfp, order)) + return NULL; + + dpool = dpool_get_from_task(current); + if (!dpool) + return NULL; + + pool = &dpool->pool[PAGES_POOL_4K]; + spin_lock_irqsave(&dpool->lock, flags); + if (!dpool->online) + goto unlock; + +retry: + page = NULL; + if (!pool->free_pages && dpool_demote_pool_locked(dpool, PAGES_POOL_2M)) + goto unlock; + + page = list_first_entry_or_null(&pool->freelist, struct page, lru); + if (!page) + goto unlock; + + __ClearPageDpool(page); + list_del(&page->lru); + pool->free_pages--; + pool->used_pages++; + + if (check_new_page(page)) { + /* This is a bad page, treat it as a used pages */ + SetPagePool(page); + goto retry; + } + + SetPagePool(page); + +unlock: + spin_unlock_irqrestore(&dpool->lock, flags); + dpool_put(dpool); + if (page) + prep_new_page(page, order, gfp, alloc_flags); + + return page; +} + +void dynamic_pool_free_page(struct page *page) +{ + struct dynamic_pool *dpool; + struct pages_pool *pool; + unsigned long flags; + + if (!dpool_enabled) + return; + + dpool = dpool_get_from_page(page); + if (!dpool) { + pr_err("get dpool failed when free page 0x%px\n", page); + return; + } + + pool = &dpool->pool[PAGES_POOL_4K]; + spin_lock_irqsave(&dpool->lock, flags); + + ClearPagePool(page); + if (!free_pages_prepare(page, 0, 0)) { + SetPagePool(page); + goto unlock; + } + + __SetPageDpool(page); + list_add(&page->lru, &pool->freelist); + pool->free_pages++; + pool->used_pages--; + +unlock: + spin_unlock_irqrestore(&dpool->lock, flags); + dpool_put(dpool); +} + /* === dynamic pool function ========================================== */
static void dpool_dump_child_memcg(struct mem_cgroup *memcg, void *message) diff --git a/mm/internal.h b/mm/internal.h index a0f252baa67f..f4416fcbae78 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -441,6 +441,7 @@ extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern bool free_pages_prepare(struct page *page, unsigned int order, fpi_t fpi_flags); +extern int check_new_page(struct page *page); extern void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags); extern int user_min_free_kbytes; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 04f4e5fc7d69..da0ac870a3a9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -53,6 +53,7 @@ #include <linux/khugepaged.h> #include <linux/delayacct.h> #include <linux/cacheinfo.h> +#include <linux/dynamic_pool.h> #include <asm/div64.h> #include "internal.h" #include "shuffle.h" @@ -1426,7 +1427,7 @@ static void check_new_page_bad(struct page *page) /* * This page is about to be returned from the page allocator */ -static int check_new_page(struct page *page) +int check_new_page(struct page *page) { if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) @@ -2484,6 +2485,11 @@ void free_unref_page(struct page *page, unsigned int order) unsigned long pfn = page_to_pfn(page); int migratetype, pcpmigratetype;
+ if (page_from_dynamic_pool(page)) { + dynamic_pool_free_page(page); + return; + } + if (!free_unref_page_prepare(page, pfn, order)) return;
@@ -2530,6 +2536,13 @@ void free_unref_page_list(struct list_head *list) /* Prepare pages for freeing */ list_for_each_entry_safe(page, next, list, lru) { unsigned long pfn = page_to_pfn(page); + + if (page_from_dynamic_pool(page)) { + list_del(&page->lru); + dynamic_pool_free_page(page); + continue; + } + if (!free_unref_page_prepare(page, pfn, 0)) { list_del(&page->lru); continue; @@ -4734,6 +4747,13 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, */ alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
+ /* Before alloc from buddy system, alloc from dpool firstly */ + if (dpool_enabled) { + page = dynamic_pool_alloc_page(alloc_gfp, order, alloc_flags); + if (page) + goto out; + } + /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); if (likely(page))