hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CDZZ
--------------------------------
There are some core functions called by function from dynamic_pool.c. This may impact performance in some case. Wrap these core functions with dpool_ prefix and use the wrapper instead.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- mm/dynamic_pool.c | 16 ++++++++-------- mm/internal.h | 12 ++++++------ mm/page_alloc.c | 29 +++++++++++++++++++++++++---- 3 files changed, 39 insertions(+), 18 deletions(-)
diff --git a/mm/dynamic_pool.c b/mm/dynamic_pool.c index 15a491a76a58..b1590362c2c9 100644 --- a/mm/dynamic_pool.c +++ b/mm/dynamic_pool.c @@ -272,7 +272,7 @@ static int dpool_demote_huge_page(struct pages_pool *src_pool, clear_compound_page(page_folio(page), PMD_ORDER); for (i = 0; i < nr_pages; i++) { subpage = folio_page(folio, i); - free_pages_prepare(subpage, 0, 0); + dpool_free_page_prepare(subpage); __SetPageDpool(subpage); list_add_tail(&subpage->lru, &dst_pool->freelist); dst_pool->free_pages++; @@ -395,7 +395,7 @@ static int dpool_promote_huge_page(struct pages_pool *src_pool, }
page = pfn_to_page(spage->start_pfn); - prep_new_page(page, PMD_ORDER, __GFP_COMP, 0); + dpool_prep_new_page(page, PMD_ORDER, __GFP_COMP, 0); set_page_count(page, 0); folio_change_private(page_folio(page), NULL); __SetPageDpool(page); @@ -616,7 +616,7 @@ static struct page *dpool_alloc_pcp_page(struct dynamic_pool *dpool) pcp_pool->free_pages--; pcp_pool->used_pages++;
- if (check_new_page(page)) { + if (dpool_check_new_page(page)) { SetPagePool(page); goto retry; } @@ -643,7 +643,7 @@ static int dpool_free_pcp_page(struct dynamic_pool *dpool, struct page *page) }
ClearPagePool(page); - if (!free_pages_prepare(page, 0, 0)) { + if (!dpool_free_page_prepare(page)) { SetPagePool(page); goto unlock; } @@ -756,7 +756,7 @@ struct page *dynamic_pool_alloc_page(gfp_t gfp, unsigned int order, pool->free_pages--; pool->used_pages++;
- if (check_new_page(page)) { + if (dpool_check_new_page(page)) { /* This is a bad page, treat it as a used pages */ SetPagePool(page); goto retry; @@ -769,7 +769,7 @@ struct page *dynamic_pool_alloc_page(gfp_t gfp, unsigned int order, put: dpool_put(dpool); if (page) - prep_new_page(page, order, gfp, alloc_flags); + dpool_prep_new_page(page, order, gfp, alloc_flags);
return page; } @@ -796,7 +796,7 @@ void dynamic_pool_free_page(struct page *page) spin_lock_irqsave(&dpool->lock, flags);
ClearPagePool(page); - if (!free_pages_prepare(page, 0, 0)) { + if (!dpool_free_page_prepare(page)) { SetPagePool(page); goto unlock; } @@ -1582,7 +1582,7 @@ static int dpool_fill_from_pagelist(struct dynamic_pool *dpool, void *arg) set_page_count(page, 0); page_mapcount_reset(page);
- if (!free_pages_prepare(page, 0, 0)) { + if (!dpool_free_page_prepare(page)) { pr_err("fill pool failed, check pages failed\n"); goto unlock; } diff --git a/mm/internal.h b/mm/internal.h index eab2199d16c8..1580641d7a95 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -434,16 +434,16 @@ static inline void prep_compound_tail(struct page *head, int tail_idx) set_page_private(p, 0); }
-typedef int __bitwise fpi_t; extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); -extern bool free_pages_prepare(struct page *page, unsigned int order, - fpi_t fpi_flags); -extern int check_new_page(struct page *page); -extern void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, - unsigned int alloc_flags); +#ifdef CONFIG_DYNAMIC_POOL +extern bool dpool_free_page_prepare(struct page *page); +extern int dpool_check_new_page(struct page *page); +extern void dpool_prep_new_page(struct page *page, unsigned int order, + gfp_t gfp_flags, unsigned int alloc_flags); +#endif extern int user_min_free_kbytes;
extern void free_unref_page(struct page *page, unsigned int order); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 815b0c0212fd..10facd4d65ec 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1080,7 +1080,7 @@ static void kernel_init_pages(struct page *page, int numpages) kasan_enable_current(); }
-__always_inline bool free_pages_prepare(struct page *page, +static __always_inline bool free_pages_prepare(struct page *page, unsigned int order, fpi_t fpi_flags) { int bad = 0; @@ -1433,7 +1433,7 @@ static void check_new_page_bad(struct page *page) /* * This page is about to be returned from the page allocator */ -int check_new_page(struct page *page) +static int check_new_page(struct page *page) { if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) @@ -1545,8 +1545,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order, page_table_check_alloc(page, order); }
-void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, - unsigned int alloc_flags) +static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, + unsigned int alloc_flags) { post_alloc_hook(page, order, gfp_flags);
@@ -1565,6 +1565,27 @@ void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, clear_page_pfmemalloc(page); }
+#ifdef CONFIG_DYNAMIC_POOL +/* + * Wrap the core functions with dpool_ prefix to avoid to call them directly. + */ +bool dpool_free_page_prepare(struct page *page) +{ + return free_pages_prepare(page, 0, 0); +} + +int dpool_check_new_page(struct page *page) +{ + return check_new_page(page); +} + +void dpool_prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, + unsigned int alloc_flags) +{ + prep_new_page(page, order, gfp_flags, alloc_flags); +} +#endif + /* * Go through the free lists for the given migratetype and remove * the smallest available page from the freelists