From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
----------------------------------------------------
The current function hugetlb_alloc_hugepage implements the allocation from normal hugepages first. When the normal hugepage is used up, it attempts to apply for temporary hugepages. Two additional modes are supported: normal hugepages only and temporary hugepages only.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/hugetlb.h | 11 +++++++++-- mm/hugetlb.c | 35 +++++++++++++++++++++++++++++++++-- mm/share_pool.c | 2 +- 3 files changed, 43 insertions(+), 5 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 230f2eb6c4474..3160270fc57ab 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -376,8 +376,15 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx);
#ifdef CONFIG_ASCEND_FEATURES +#define HUGETLB_ALLOC_NONE 0x00 +#define HUGETLB_ALLOC_NORMAL 0x01 /* normal hugepage */ +#define HUGETLB_ALLOC_TEMP 0x02 /* temporary hugepage */ +#define HUGETLB_ALLOC_MASK (HUGETLB_ALLOC_NONE | \ + HUGETLB_ALLOC_NORMAL | \ + HUGETLB_ALLOC_TEMP) + const struct hstate *hugetlb_get_hstate(void); -struct page *hugetlb_alloc_hugepage(int nid); +struct page *hugetlb_alloc_hugepage(int nid, int flag); int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, pgprot_t prot, struct page *hpage); int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, @@ -389,7 +396,7 @@ static inline const struct hstate *hugetlb_get_hstate(void) return NULL; }
-static inline struct page *hugetlb_alloc_hugepage(int nid) +static inline struct page *hugetlb_alloc_hugepage(int nid, int flag) { return NULL; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5c219f848db45..375157c7acadf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5241,17 +5241,48 @@ const struct hstate *hugetlb_get_hstate(void) } EXPORT_SYMBOL_GPL(hugetlb_get_hstate);
+static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h, + gfp_t gfp_mask, int nid) +{ + struct page *page = NULL; + + spin_lock(&hugetlb_lock); + if (h->free_huge_pages - h->resv_huge_pages > 0) + page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL); + spin_unlock(&hugetlb_lock); + + return page; +} + /* * Allocate hugepage without reserve */ -struct page *hugetlb_alloc_hugepage(int nid) +struct page *hugetlb_alloc_hugepage(int nid, int flag) { + struct hstate *h = &default_hstate; + gfp_t gfp_mask = htlb_alloc_mask(h); + struct page *page = NULL; + VM_WARN_ON(nid < 0 || nid >= MAX_NUMNODES);
+ if (flag & ~HUGETLB_ALLOC_MASK) + return NULL; + if (nid == NUMA_NO_NODE) nid = numa_mem_id();
- return alloc_huge_page_node(&default_hstate, nid); + gfp_mask |= __GFP_THISNODE; + + if (flag & HUGETLB_ALLOC_NORMAL) + page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid); + else if (flag & HUGETLB_ALLOC_TEMP) { + if (enable_charge_mighp) + gfp_mask |= __GFP_ACCOUNT; + page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); + } else + page = alloc_huge_page_node(h, nid); + + return page; } EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);
diff --git a/mm/share_pool.c b/mm/share_pool.c index 924c896b8e944..4c9105722c35e 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -3133,7 +3133,7 @@ struct page *sp_alloc_pages(struct vm_struct *area, gfp_t mask, unsigned int page_order, int node) { if (area->flags & VM_HUGE_PAGES) - return hugetlb_alloc_hugepage(NUMA_NO_NODE); + return hugetlb_alloc_hugepage(NUMA_NO_NODE, HUGETLB_ALLOC_NONE); else return alloc_pages_node(node, mask, page_order); }