From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9UDJX
--------------------------------
Function get_page_policy_node is used to return a suitable nid for order-0 page allocation.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/linux/mempolicy.h | 4 ++++ mm/mempolicy.c | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index d4920e4a3e38..40abd3957294 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -161,6 +161,10 @@ extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
extern unsigned int mempolicy_slab_node(void);
+int get_vma_policy_node(struct vm_area_struct *vma, unsigned long addr, + gfp_t gfp_flags, struct mempolicy **mpol, + nodemask_t **nodemask); + extern enum zone_type policy_zone;
static inline void check_highest_zone(enum zone_type k) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 81bd26fb661f..f7c1cda0a9fe 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2198,6 +2198,29 @@ static inline unsigned interleave_nid(struct mempolicy *pol, return interleave_nodes(pol); }
+/* get policy node for order-0 page */ +int get_vma_policy_node(struct vm_area_struct *vma, unsigned long addr, + gfp_t gfp_flags, struct mempolicy **mpol, + nodemask_t **nodemask) +{ + int nid, mode; + + *mpol = get_vma_policy(vma, addr); + *nodemask = NULL; + mode = (*mpol)->mode; + + if (unlikely(mode == MPOL_INTERLEAVE)) { + nid = interleave_nid(*mpol, vma, addr, PAGE_SHIFT); + } else { + nid = policy_node(gfp_flags, *mpol, numa_node_id()); + if ((*mpol)->mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) + *nodemask = &(*mpol)->v.nodes; + } + + return nid; +} +EXPORT_SYMBOL_GPL(get_vma_policy_node); + #ifdef CONFIG_HUGETLBFS /* * huge_node(@vma, @addr, @gfp_flags, @mpol)