From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4D63I CVE: NA
----------------------------------------------------
The current function hugetlb_alloc_hugepage implements the allocation from static hugepages first. When the static hugepage is used up, it attempts to apply for hugepages from buddy system. Two additional modes are supported: static hugepages only and buddy hugepages only.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Signed-off-by: Guo Mengqi guomengqi3@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/hugetlb.h | 11 +++++++++-- mm/hugetlb.c | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 42 insertions(+), 4 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 830c41a5ca70e..de6cdfa51694c 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -374,8 +374,15 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx);
#ifdef CONFIG_ASCEND_FEATURES +#define HUGETLB_ALLOC_NONE 0x00 +#define HUGETLB_ALLOC_NORMAL 0x01 /* normal hugepage */ +#define HUGETLB_ALLOC_BUDDY 0x02 /* buddy hugepage */ +#define HUGETLB_ALLOC_MASK (HUGETLB_ALLOC_NONE | \ + HUGETLB_ALLOC_NORMAL | \ + HUGETLB_ALLOC_BUDDY) + const struct hstate *hugetlb_get_hstate(void); -struct page *hugetlb_alloc_hugepage(int nid); +struct page *hugetlb_alloc_hugepage(int nid, int flag); int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, pgprot_t prot, struct page *hpage); #else @@ -384,7 +391,7 @@ static inline const struct hstate *hugetlb_get_hstate(void) return NULL; }
-static inline struct page *hugetlb_alloc_hugepage(int nid) +static inline struct page *hugetlb_alloc_hugepage(int nid, int flag) { return NULL; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3091c61bb63e3..907b1351b0f5f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5233,17 +5233,48 @@ const struct hstate *hugetlb_get_hstate(void) } EXPORT_SYMBOL_GPL(hugetlb_get_hstate);
+static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h, + gfp_t gfp_mask, int nid) +{ + struct page *page = NULL; + + spin_lock(&hugetlb_lock); + if (h->free_huge_pages - h->resv_huge_pages > 0) + page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL); + spin_unlock(&hugetlb_lock); + + return page; +} + /* * Allocate hugepage without reserve */ -struct page *hugetlb_alloc_hugepage(int nid) +struct page *hugetlb_alloc_hugepage(int nid, int flag) { + struct hstate *h = &default_hstate; + gfp_t gfp_mask = htlb_alloc_mask(h); + struct page *page = NULL; + VM_WARN_ON(nid < 0 || nid >= MAX_NUMNODES);
+ if (flag & ~HUGETLB_ALLOC_MASK) + return NULL; + if (nid == NUMA_NO_NODE) nid = numa_mem_id();
- return alloc_huge_page_node(&default_hstate, nid); + gfp_mask |= __GFP_THISNODE; + + if (flag & HUGETLB_ALLOC_NORMAL) + page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid); + else if (flag & HUGETLB_ALLOC_BUDDY) { + if (enable_charge_mighp) + gfp_mask |= __GFP_ACCOUNT; + page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); + } else + page = alloc_huge_page_node(h, nid); + + return page; } EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);