hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CDZZ
--------------------------------
Fallback to dpool if reliable memory is not enough.
Kswapd and driect reclaim will not be trigger. Since the later normal memory allocation can trigger this, there is no problem here.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/dynamic_pool.h | 6 ++++++ mm/dynamic_pool.c | 4 ++-- mm/page_alloc.c | 27 +++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 2 deletions(-)
diff --git a/include/linux/dynamic_pool.h b/include/linux/dynamic_pool.h index d6ef9214c586..1d41c6a853c3 100644 --- a/include/linux/dynamic_pool.h +++ b/include/linux/dynamic_pool.h @@ -111,6 +111,7 @@ void dynamic_pool_bind_file(struct hugetlbfs_inode_info *p, struct hstate *h); void dynamic_pool_unbind_file(struct hugetlbfs_inode_info *p); int dynamic_pool_hugetlb_acct_memory(struct hstate *h, long delta, struct hugetlbfs_inode_info *p); +bool dynamic_pool_should_alloc(gfp_t gfp_mask, unsigned int order); struct folio *dynamic_pool_alloc_hugepage(struct hugetlbfs_inode_info *p, struct hstate *h, bool reserved); void dynamic_pool_free_hugepage(struct folio *folio, bool restore_reserve); @@ -186,6 +187,11 @@ static inline int dynamic_pool_hugetlb_acct_memory(struct hstate *h, long delta, return -ENOMEM; }
+static inline bool dynamic_pool_should_alloc(gfp_t gfp_mask, unsigned int order) +{ + return false; +} + static inline struct folio *dynamic_pool_alloc_hugepage(struct hugetlbfs_inode_info *p, struct hstate *h, bool reserved) { diff --git a/mm/dynamic_pool.c b/mm/dynamic_pool.c index ce1225b85a2e..751a2dcc6816 100644 --- a/mm/dynamic_pool.c +++ b/mm/dynamic_pool.c @@ -682,7 +682,7 @@ int dynamic_pool_can_attach(struct task_struct *tsk, struct mem_cgroup *memcg) return ret; }
-static bool dpool_should_alloc(gfp_t gfp_mask, unsigned int order) +bool dynamic_pool_should_alloc(gfp_t gfp_mask, unsigned int order) { gfp_t gfp = gfp_mask & GFP_HIGHUSER_MOVABLE;
@@ -719,7 +719,7 @@ struct page *dynamic_pool_alloc_page(gfp_t gfp, unsigned int order, if (!dpool_enabled) return NULL;
- if (!dpool_should_alloc(gfp, order)) + if (!dynamic_pool_should_alloc(gfp, order)) return NULL;
dpool = dpool_get_from_task(current); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b063dd0d456d..815b0c0212fd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4098,6 +4098,17 @@ static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask, return; } } + +static inline bool mem_reliable_fallback_dpool(gfp_t gfp_mask, unsigned int order) +{ + if (!reliable_allow_fb_enabled()) + return false; + + if (!(gfp_mask & GFP_RELIABLE)) + return false; + + return dynamic_pool_should_alloc(gfp_mask & ~GFP_RELIABLE, order); +} #else static inline struct zone *mem_reliable_fallback_zone(gfp_t gfp_mask, struct alloc_context *ac) @@ -4106,6 +4117,10 @@ static inline struct zone *mem_reliable_fallback_zone(gfp_t gfp_mask, } static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask, struct alloc_context *ac) {} +static inline bool mem_reliable_fallback_dpool(gfp_t gfp_mask, unsigned int order) +{ + return false; +} #endif
static inline struct page * @@ -4765,6 +4780,18 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, if (likely(page)) goto out;
+ /* + * Fallback to dpool if mirrored momory is not enough. + * + * Kswapd and driect reclaim will not be trigger, since the later + * normal memory allocation can trigger this, there is no problem + * here. + */ + if (mem_reliable_fallback_dpool(gfp, order)) { + gfp &= ~GFP_RELIABLE; + goto retry; + } + alloc_gfp = gfp; ac.spread_dirty_pages = false;