From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
-------------------------------------------------------------
The following functions are used only in the ascend scenario: hugetlb_get_hstate, hugetlb_alloc_hugepage, hugetlb_insert_hugepage_pte, hugetlb_insert_hugepage_pte_by_pa
Remove unused interface hugetlb_insert_hugepage
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/hugetlb.h | 37 +++++++++++++++++++++++++++---------- mm/hugetlb.c | 37 +------------------------------------ 2 files changed, 28 insertions(+), 46 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index eefa6d42140d3..230f2eb6c4474 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -375,17 +375,31 @@ struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx);
-#ifdef CONFIG_ARM64 +#ifdef CONFIG_ASCEND_FEATURES const struct hstate *hugetlb_get_hstate(void); struct page *hugetlb_alloc_hugepage(int nid); int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, pgprot_t prot, struct page *hpage); -#endif int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, unsigned long vir_addr, pgprot_t prot, unsigned long phy_addr); -int hugetlb_insert_hugepage(struct vm_area_struct *vma, unsigned long addr, - struct page *hpage, pgprot_t prot); +#else +static inline const struct hstate *hugetlb_get_hstate(void) +{ + return NULL; +} + +static inline struct page *hugetlb_alloc_hugepage(int nid) +{ + return NULL; +} + +static inline int hugetlb_insert_hugepage_pte(struct mm_struct *mm, + unsigned long addr, pgprot_t prot, struct page *hpage) +{ + return -EPERM; +} +#endif
/* arch callback */ int __init __alloc_bootmem_huge_page(struct hstate *h); @@ -637,12 +651,6 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr { }
-static inline int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, - unsigned long vir_addr, - pgprot_t prot, unsigned long phy_addr) -{ - return 0; -} #endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h, @@ -655,6 +663,15 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h, return ptl; }
+#ifndef CONFIG_ASCEND_FEATURES +static inline int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, + unsigned long vir_addr, + pgprot_t prot, unsigned long phy_addr) +{ + return -EPERM; +} +#endif + #ifdef CONFIG_ASCEND_SHARE_POOL pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable); #endif diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 019dbae493e57..5c219f848db45 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5234,7 +5234,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) } }
-#ifdef CONFIG_ARM64 +#ifdef CONFIG_ASCEND_FEATURES const struct hstate *hugetlb_get_hstate(void) { return &default_hstate; @@ -5335,41 +5335,6 @@ int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, } EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte_by_pa);
-int hugetlb_insert_hugepage(struct vm_area_struct *vma, unsigned long addr, - struct page *hpage, pgprot_t prot) -{ - struct hstate *h = hstate_vma(vma); - int anon_rmap = 0; - spinlock_t *ptl; - pte_t *ptep; - pte_t pte; - struct mm_struct *mm = vma->vm_mm; - - ptep = hugetlb_huge_pte_alloc(mm, addr, huge_page_size(h)); - if (!ptep) - return -ENXIO; - - get_page(hpage); - - ptl = huge_pte_lock(h, mm, ptep); - if (anon_rmap) { - ClearPagePrivate(hpage); - hugepage_add_new_anon_rmap(hpage, vma, addr); - } else { - page_dup_rmap(hpage, true); - } - - pte = make_huge_pte(vma, hpage, ((vma->vm_flags & VM_WRITE) - && (vma->vm_flags & VM_SHARED))); - set_huge_pte_at(mm, addr, ptep, pte); - - hugetlb_count_add(pages_per_huge_page(h), mm); - - spin_unlock(ptl); - - return 0; -} - #ifdef CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES
static int __init ascend_enable_charge_migrate_hugepages(char *s)
From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
----------------------------------------------------
The current function hugetlb_alloc_hugepage implements the allocation from normal hugepages first. When the normal hugepage is used up, it attempts to apply for temporary hugepages. Two additional modes are supported: normal hugepages only and temporary hugepages only.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/hugetlb.h | 11 +++++++++-- mm/hugetlb.c | 35 +++++++++++++++++++++++++++++++++-- mm/share_pool.c | 2 +- 3 files changed, 43 insertions(+), 5 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 230f2eb6c4474..3160270fc57ab 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -376,8 +376,15 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx);
#ifdef CONFIG_ASCEND_FEATURES +#define HUGETLB_ALLOC_NONE 0x00 +#define HUGETLB_ALLOC_NORMAL 0x01 /* normal hugepage */ +#define HUGETLB_ALLOC_TEMP 0x02 /* temporary hugepage */ +#define HUGETLB_ALLOC_MASK (HUGETLB_ALLOC_NONE | \ + HUGETLB_ALLOC_NORMAL | \ + HUGETLB_ALLOC_TEMP) + const struct hstate *hugetlb_get_hstate(void); -struct page *hugetlb_alloc_hugepage(int nid); +struct page *hugetlb_alloc_hugepage(int nid, int flag); int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, pgprot_t prot, struct page *hpage); int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, @@ -389,7 +396,7 @@ static inline const struct hstate *hugetlb_get_hstate(void) return NULL; }
-static inline struct page *hugetlb_alloc_hugepage(int nid) +static inline struct page *hugetlb_alloc_hugepage(int nid, int flag) { return NULL; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5c219f848db45..375157c7acadf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5241,17 +5241,48 @@ const struct hstate *hugetlb_get_hstate(void) } EXPORT_SYMBOL_GPL(hugetlb_get_hstate);
+static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h, + gfp_t gfp_mask, int nid) +{ + struct page *page = NULL; + + spin_lock(&hugetlb_lock); + if (h->free_huge_pages - h->resv_huge_pages > 0) + page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL); + spin_unlock(&hugetlb_lock); + + return page; +} + /* * Allocate hugepage without reserve */ -struct page *hugetlb_alloc_hugepage(int nid) +struct page *hugetlb_alloc_hugepage(int nid, int flag) { + struct hstate *h = &default_hstate; + gfp_t gfp_mask = htlb_alloc_mask(h); + struct page *page = NULL; + VM_WARN_ON(nid < 0 || nid >= MAX_NUMNODES);
+ if (flag & ~HUGETLB_ALLOC_MASK) + return NULL; + if (nid == NUMA_NO_NODE) nid = numa_mem_id();
- return alloc_huge_page_node(&default_hstate, nid); + gfp_mask |= __GFP_THISNODE; + + if (flag & HUGETLB_ALLOC_NORMAL) + page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid); + else if (flag & HUGETLB_ALLOC_TEMP) { + if (enable_charge_mighp) + gfp_mask |= __GFP_ACCOUNT; + page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); + } else + page = alloc_huge_page_node(h, nid); + + return page; } EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);
diff --git a/mm/share_pool.c b/mm/share_pool.c index 924c896b8e944..4c9105722c35e 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -3133,7 +3133,7 @@ struct page *sp_alloc_pages(struct vm_struct *area, gfp_t mask, unsigned int page_order, int node) { if (area->flags & VM_HUGE_PAGES) - return hugetlb_alloc_hugepage(NUMA_NO_NODE); + return hugetlb_alloc_hugepage(NUMA_NO_NODE, HUGETLB_ALLOC_NONE); else return alloc_pages_node(node, mask, page_order); }
From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: doc bugzilla: NA CVE: NA
-------------------------------------------------------
commit(bd177f8f0548f): Only __GFP_THISNODE marked allocations will come from the CDM node.
Therefore, when we alloc normal hugepages, if __GFP_THISNODE is marked, hugepages can be applied for from the specified nid.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/hugetlb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 375157c7acadf..962a30ab86d85 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -918,7 +918,8 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, bool mbind_cdmnode = false;
#ifdef CONFIG_COHERENT_DEVICE - if (is_cdm_node(nid) && mpol != NULL && mpol->mode == MPOL_BIND) + if (is_cdm_node(nid) && ((mpol != NULL && mpol->mode == MPOL_BIND) || + (gfp_mask & __GFP_THISNODE))) mbind_cdmnode = true; #endif zonelist = node_zonelist(nid, gfp_mask);