hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8S9BY CVE: NA
--------------------------------
Add function to alloc huge page from dhugetlb_pool.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/dynamic_pool.h | 14 ++++++ mm/dynamic_pool.c | 90 ++++++++++++++++++++++++++++++++++++ mm/hugetlb.c | 22 +++++++++ 3 files changed, 126 insertions(+)
diff --git a/include/linux/dynamic_pool.h b/include/linux/dynamic_pool.h index 3a404700e1b0..728193ef0c38 100644 --- a/include/linux/dynamic_pool.h +++ b/include/linux/dynamic_pool.h @@ -90,6 +90,9 @@ void dynamic_pool_bind_file(struct hugetlbfs_inode_info *p, struct hstate *h); void dynamic_pool_unbind_file(struct hugetlbfs_inode_info *p); int dynamic_pool_hugetlb_acct_memory(struct hstate *h, long delta, struct hugetlbfs_inode_info *p); +struct folio *dynamic_pool_alloc_hugepage(struct hugetlbfs_inode_info *p, + struct hstate *h, bool reserved); +void dynamic_pool_free_hugepage(struct folio *folio, bool restore_reserve);
void dynamic_pool_inherit(struct mem_cgroup *parent, struct mem_cgroup *memcg); int dynamic_pool_destroy(struct cgroup *cgrp, bool *clear_css_online); @@ -147,6 +150,17 @@ static inline int dynamic_pool_hugetlb_acct_memory(struct hstate *h, long delta, { return -ENOMEM; } + +static inline struct folio *dynamic_pool_alloc_hugepage(struct hugetlbfs_inode_info *p, + struct hstate *h, bool reserved) +{ + return NULL; +} + +static inline void dynamic_pool_free_hugepage(struct folio *folio, + bool restore_reserve) +{ +} #endif
static inline void dynamic_pool_inherit(struct mem_cgroup *parent, diff --git a/mm/dynamic_pool.c b/mm/dynamic_pool.c index 18eb03a192b7..8ed60c5f21dc 100644 --- a/mm/dynamic_pool.c +++ b/mm/dynamic_pool.c @@ -793,6 +793,96 @@ int dynamic_pool_hugetlb_acct_memory(struct hstate *h, long delta, return ret; }
+struct folio *dynamic_pool_alloc_hugepage(struct hugetlbfs_inode_info *p, + struct hstate *h, bool reserved) +{ + struct dynamic_pool *dpool; + struct pages_pool *pool; + struct folio *folio = NULL; + unsigned long flags; + int type; + + if (!dpool_enabled) + return NULL; + + dpool = p->dpool; + if (!dpool) + return NULL; + + spin_lock_irqsave(&dpool->lock, flags); + if (!dpool->online) + goto unlock; + + if (hstate_is_gigantic(h)) + type = PAGES_POOL_1G; + else + type = PAGES_POOL_2M; + pool = &dpool->pool[type]; + + list_for_each_entry(folio, &pool->freelist, lru) { + if (folio_test_hwpoison(folio)) + continue; + + list_del(&folio->lru); + __folio_clear_dpool(folio); + folio_ref_unfreeze(folio, 1); + pool->free_huge_pages--; + pool->used_huge_pages++; + if (reserved) { + folio_set_hugetlb_restore_reserve(folio); + pool->resv_huge_pages--; + } + folio_set_pool(folio); + goto unlock; + } + folio = NULL; + +unlock: + spin_unlock_irqrestore(&dpool->lock, flags); + + return folio; +} + +void dynamic_pool_free_hugepage(struct folio *folio, bool restore_reserve) +{ + struct hstate *h = folio_hstate(folio); + struct dynamic_pool *dpool; + struct pages_pool *pool; + unsigned long flags; + int type; + + if (!dpool_enabled) + return; + + dpool = dpool_get_from_page(folio_page(folio, 0)); + if (!dpool) { + pr_err("get dpool failed when free hugepage 0x%px\n", folio); + return; + } + + spin_lock_irqsave(&dpool->lock, flags); + if (hstate_is_gigantic(h)) + type = PAGES_POOL_1G; + else + type = PAGES_POOL_2M; + pool = &dpool->pool[type]; + + if (folio_test_hwpoison(folio)) + goto unlock; + + folio_clear_pool(folio); + __folio_set_dpool(folio); + list_add(&folio->lru, &pool->freelist); + pool->free_huge_pages++; + pool->used_huge_pages--; + if (restore_reserve) + pool->resv_huge_pages++; + +unlock: + spin_unlock_irqrestore(&dpool->lock, flags); + dpool_put(dpool); +} + /* === dynamic pool function ========================================== */
static void dpool_dump_child_memcg(struct mem_cgroup *memcg, void *message) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index def6959ef574..0c524a0f6de2 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1947,6 +1947,14 @@ void free_huge_folio(struct folio *folio) pages_per_huge_page(h), folio); hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), pages_per_huge_page(h), folio); + + if (page_from_dynamic_pool(folio_page(folio, 0))) { + list_del(&folio->lru); + spin_unlock_irqrestore(&hugetlb_lock, flags); + dynamic_pool_free_hugepage(folio, restore_reserve); + return; + } + if (restore_reserve) h->resv_huge_pages++;
@@ -3186,6 +3194,19 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (ret) goto out_uncharge_cgroup_reservation;
+ if (file_in_dynamic_pool(info)) { + bool reserved = false; + + if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) + reserved = true; + folio = dynamic_pool_alloc_hugepage(info, h, reserved); + if (!folio) + goto out_uncharge_cgroup; + spin_lock_irq(&hugetlb_lock); + list_add(&folio->lru, &h->hugepage_activelist); + goto out; + } + spin_lock_irq(&hugetlb_lock); /* * glb_chg is passed to indicate whether or not a page must be taken @@ -3208,6 +3229,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, /* Fall through */ }
+out: hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); /* If allocation is not consuming a reservation, also store the * hugetlb_cgroup pointer on the page.