From: Ma Wupeng mawupeng1@huawei.com
Revert to bring MPOL_LOCAL back.
changelog since v1: - bugfix in __mpol_equal
Ma Wupeng (1): Revert "mm/mempolicy: don't handle MPOL_LOCAL like a fake MPOL_PREFERRED policy"
include/uapi/linux/mempolicy.h | 1 + mm/mempolicy.c | 138 +++++++++++++++++++-------------- 2 files changed, 81 insertions(+), 58 deletions(-)
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8PGB8 CVE: NA
---------------------------------------
This reverts commit d634f94d18f323f3e037d1094b70ba9db3196140.
Revert this commit to bring MPOL_LOCAL back.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/uapi/linux/mempolicy.h | 1 + mm/mempolicy.c | 138 +++++++++++++++++++-------------- 2 files changed, 81 insertions(+), 58 deletions(-)
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 7c4ffc207f67..fb94925dbb8e 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -59,6 +59,7 @@ enum { * are never OR'ed into the mode in mempolicy API arguments. */ #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ +#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ #define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b58ec3f98896..cae370918383 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -125,7 +125,8 @@ enum zone_type policy_zone = 0; static struct mempolicy_wrapper default_policy = { .policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ - .mode = MPOL_LOCAL, + .mode = MPOL_PREFERRED, + .flags = MPOL_F_LOCAL, } };
@@ -240,9 +241,12 @@ static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) { - if (nodes_empty(*nodes)) - return -EINVAL; - pol->v.preferred_node = first_node(*nodes); + if (!nodes) + pol->flags |= MPOL_F_LOCAL; /* local allocation */ + else if (nodes_empty(*nodes)) + return -EINVAL; /* no allowed nodes */ + else + pol->v.preferred_node = first_node(*nodes); return 0; }
@@ -265,7 +269,8 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) /* * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if * any, for the new policy. mpol_new() has already validated the nodes - * parameter with respect to the policy mode and flags. + * parameter with respect to the policy mode and flags. But, we need to + * handle an empty nodemask with MPOL_PREFERRED here. * * Must be called holding task's alloc_lock to protect task's mems_allowed * and mempolicy. May also be called holding the mmap_lock for write. @@ -275,14 +280,9 @@ static int mpol_set_nodemask(struct mempolicy *pol, { int ret;
- /* - * Default (pol==NULL) resp. local memory policies are not a - * subject of any remapping. They also do not need any special - * constructor. - */ - if (!pol || pol->mode == MPOL_LOCAL) + /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ + if (pol == NULL) return 0; - /* Check N_MEMORY */ nodes_and(nsc->mask1, cpuset_current_mems_allowed, node_states[N_MEMORY]); @@ -291,18 +291,25 @@ static int mpol_set_nodemask(struct mempolicy *pol, nodes_or(nsc->mask1, cdmmask, nsc->mask1); #endif VM_BUG_ON(!nodes); + if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) + nodes = NULL; /* explicit local allocation */ + else { + if (pol->flags & MPOL_F_RELATIVE_NODES) + mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); + else + nodes_and(nsc->mask2, *nodes, nsc->mask1);
- if (pol->flags & MPOL_F_RELATIVE_NODES) - mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); - else - nodes_and(nsc->mask2, *nodes, nsc->mask1); + if (mpol_store_user_nodemask(pol)) + pol->w.user_nodemask = *nodes; + else + pol->w.cpuset_mems_allowed = + cpuset_current_mems_allowed; + }
- if (mpol_store_user_nodemask(pol)) - pol->w.user_nodemask = *nodes; + if (nodes) + ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); else - pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; - - ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); + ret = mpol_ops[pol->mode].create(pol, NULL); return ret; }
@@ -336,14 +343,13 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, if (((flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES))) return ERR_PTR(-EINVAL); - - mode = MPOL_LOCAL; } } else if (mode == MPOL_LOCAL) { if (!nodes_empty(*nodes) || (flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES)) return ERR_PTR(-EINVAL); + mode = MPOL_PREFERRED; } else if (nodes_empty(*nodes)) return ERR_PTR(-EINVAL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); @@ -393,7 +399,25 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) static void mpol_rebind_preferred(struct mempolicy *pol, const nodemask_t *nodes) { - pol->w.cpuset_mems_allowed = *nodes; + nodemask_t tmp; + + if (pol->flags & MPOL_F_STATIC_NODES) { + int node = first_node(pol->w.user_nodemask); + + if (node_isset(node, *nodes)) { + pol->v.preferred_node = node; + pol->flags &= ~MPOL_F_LOCAL; + } else + pol->flags |= MPOL_F_LOCAL; + } else if (pol->flags & MPOL_F_RELATIVE_NODES) { + mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); + pol->v.preferred_node = first_node(tmp); + } else if (!(pol->flags & MPOL_F_LOCAL)) { + pol->v.preferred_node = node_remap(pol->v.preferred_node, + pol->w.cpuset_mems_allowed, + *nodes); + pol->w.cpuset_mems_allowed = *nodes; + } }
/* @@ -407,7 +431,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) { if (!pol || pol->mode == MPOL_LOCAL) return; - if (!mpol_store_user_nodemask(pol) && + if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) return;
@@ -458,9 +482,6 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { .create = mpol_new_bind, .rebind = mpol_rebind_nodemask, }, - [MPOL_LOCAL] = { - .rebind = mpol_rebind_default, - }, [MPOL_PREFERRED_MANY] = { .create = mpol_new_preferred_many, .rebind = mpol_rebind_preferred, @@ -946,12 +967,10 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) case MPOL_PREFERRED_MANY: *nodes = p->v.nodes; break; - case MPOL_LOCAL: - /* return empty node mask for local allocation */ - break; - case MPOL_PREFERRED: - node_set(p->v.preferred_node, *nodes); + if (!(p->flags & MPOL_F_LOCAL)) + node_set(p->v.preferred_node, *nodes); + /* else return empty node mask for local allocation */ break; default: BUG(); @@ -2031,7 +2050,7 @@ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) struct mempolicy_wrapper *warpper;
warpper = container_of(policy, struct mempolicy_wrapper, policy); - if (policy->mode == MPOL_PREFERRED) { + if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) { nd = policy->v.preferred_node; } else { /* @@ -2082,11 +2101,14 @@ unsigned int mempolicy_slab_node(void) return node;
policy = current->mempolicy; - if (!policy) + if (!policy || policy->flags & MPOL_F_LOCAL) return node;
switch (policy->mode) { case MPOL_PREFERRED: + /* + * handled MPOL_F_LOCAL above + */ return policy->v.preferred_node;
case MPOL_INTERLEAVE: @@ -2108,8 +2130,6 @@ unsigned int mempolicy_slab_node(void) &policy->v.nodes); return z->zone ? zone_to_nid(z->zone) : node; } - case MPOL_LOCAL: - return node;
default: BUG(); @@ -2233,19 +2253,17 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask) mempolicy = current->mempolicy; switch (mempolicy->mode) { case MPOL_PREFERRED: - nid = mempolicy->v.preferred_node; + if (mempolicy->flags & MPOL_F_LOCAL) + nid = numa_node_id(); + else + nid = mempolicy->v.preferred_node; init_nodemask_of_node(mask, nid); break;
case MPOL_PREFERRED_MANY: case MPOL_BIND: case MPOL_INTERLEAVE: - *mask = mempolicy->v.nodes; - break; - - case MPOL_LOCAL: - nid = numa_node_id(); - init_nodemask_of_node(mask, nid); + *mask = mempolicy->v.nodes; break;
default: @@ -2396,7 +2414,7 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, * If the policy is interleave or does not allow the current * node in its nodemask, we allocate the standard way. */ - if (pol->mode == MPOL_PREFERRED) + if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) hpage_node = pol->v.preferred_node;
nmask = policy_nodemask(gfp, pol); @@ -2544,13 +2562,14 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
switch (a->mode) { case MPOL_BIND: + case MPOL_PREFERRED_MANY: case MPOL_INTERLEAVE: return !!nodes_equal(a->v.nodes, b->v.nodes); case MPOL_PREFERRED: - case MPOL_PREFERRED_MANY: + /* a's ->flags is the same as b's */ + if (a->flags & MPOL_F_LOCAL) + return true; return a->v.preferred_node == b->v.preferred_node; - case MPOL_LOCAL: - return true; default: BUG(); return false; @@ -2688,13 +2707,10 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long break;
case MPOL_PREFERRED: - if (node_isset(curnid, pol->v.nodes)) - goto out; - polnid = pol->v.preferred_node; - break; - - case MPOL_LOCAL: - polnid = numa_node_id(); + if (pol->flags & MPOL_F_LOCAL) + polnid = numa_node_id(); + else + polnid = pol->v.preferred_node; break;
case MPOL_BIND: @@ -3060,6 +3076,9 @@ void numa_default_policy(void) * Parse and format mempolicy from/to strings */
+/* + * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. + */ static const char * const policy_modes[] = { [MPOL_DEFAULT] = "default", @@ -3138,6 +3157,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) */ if (nodelist) goto out; + mode = MPOL_PREFERRED; break; case MPOL_DEFAULT: /* @@ -3182,7 +3202,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) else if (nodelist) new->v.preferred_node = first_node(nodes); else - new->mode = MPOL_LOCAL; + new->flags |= MPOL_F_LOCAL;
/* * Save nodes for contextualization: this will be used to "clone" @@ -3229,10 +3249,12 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
switch (mode) { case MPOL_DEFAULT: - case MPOL_LOCAL: break; case MPOL_PREFERRED: - node_set(pol->v.preferred_node, nodes); + if (flags & MPOL_F_LOCAL) + mode = MPOL_LOCAL; + else + node_set(pol->v.preferred_node, nodes); break; case MPOL_PREFERRED_MANY: case MPOL_BIND:
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8KS9A CVE: NA
---------------------------------------
This reverts commit d634f94d18f323f3e037d1094b70ba9db3196140.
Revert this commit to bring MPOL_LOCAL back.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/uapi/linux/mempolicy.h | 1 + mm/mempolicy.c | 134 +++++++++++++++++++-------------- 2 files changed, 80 insertions(+), 55 deletions(-)
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 7c4ffc207f67..fb94925dbb8e 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -59,6 +59,7 @@ enum { * are never OR'ed into the mode in mempolicy API arguments. */ #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ +#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ #define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index c99a5843429a..d1841e928401 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -128,7 +128,8 @@ enum zone_type policy_zone = 0; static struct mempolicy_wrapper default_policy = { .policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ - .mode = MPOL_LOCAL, + .mode = MPOL_PREFERRED, + .flags = MPOL_F_LOCAL, } };
@@ -243,9 +244,12 @@ static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) { - if (nodes_empty(*nodes)) - return -EINVAL; - pol->v.preferred_node = first_node(*nodes); + if (!nodes) + pol->flags |= MPOL_F_LOCAL; /* local allocation */ + else if (nodes_empty(*nodes)) + return -EINVAL; /* no allowed nodes */ + else + pol->v.preferred_node = first_node(*nodes); return 0; }
@@ -268,7 +272,8 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) /* * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if * any, for the new policy. mpol_new() has already validated the nodes - * parameter with respect to the policy mode and flags. + * parameter with respect to the policy mode and flags. But, we need to + * handle an empty nodemask with MPOL_PREFERRED here. * * Must be called holding task's alloc_lock to protect task's mems_allowed * and mempolicy. May also be called holding the mmap_lock for write. @@ -278,14 +283,9 @@ static int mpol_set_nodemask(struct mempolicy *pol, { int ret;
- /* - * Default (pol==NULL) resp. local memory policies are not a - * subject of any remapping. They also do not need any special - * constructor. - */ - if (!pol || pol->mode == MPOL_LOCAL) + /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ + if (pol == NULL) return 0; - /* Check N_MEMORY */ nodes_and(nsc->mask1, cpuset_current_mems_allowed, node_states[N_MEMORY]); @@ -294,18 +294,25 @@ static int mpol_set_nodemask(struct mempolicy *pol, nodes_or(nsc->mask1, cdmmask, nsc->mask1); #endif VM_BUG_ON(!nodes); + if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) + nodes = NULL; /* explicit local allocation */ + else { + if (pol->flags & MPOL_F_RELATIVE_NODES) + mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); + else + nodes_and(nsc->mask2, *nodes, nsc->mask1);
- if (pol->flags & MPOL_F_RELATIVE_NODES) - mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); - else - nodes_and(nsc->mask2, *nodes, nsc->mask1); + if (mpol_store_user_nodemask(pol)) + pol->w.user_nodemask = *nodes; + else + pol->w.cpuset_mems_allowed = + cpuset_current_mems_allowed; + }
- if (mpol_store_user_nodemask(pol)) - pol->w.user_nodemask = *nodes; + if (nodes) + ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); else - pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; - - ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); + ret = mpol_ops[pol->mode].create(pol, NULL); return ret; }
@@ -339,14 +346,13 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, if (((flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES))) return ERR_PTR(-EINVAL); - - mode = MPOL_LOCAL; } } else if (mode == MPOL_LOCAL) { if (!nodes_empty(*nodes) || (flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES)) return ERR_PTR(-EINVAL); + mode = MPOL_PREFERRED; } else if (nodes_empty(*nodes)) return ERR_PTR(-EINVAL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); @@ -396,7 +402,25 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) static void mpol_rebind_preferred(struct mempolicy *pol, const nodemask_t *nodes) { - pol->w.cpuset_mems_allowed = *nodes; + nodemask_t tmp; + + if (pol->flags & MPOL_F_STATIC_NODES) { + int node = first_node(pol->w.user_nodemask); + + if (node_isset(node, *nodes)) { + pol->v.preferred_node = node; + pol->flags &= ~MPOL_F_LOCAL; + } else + pol->flags |= MPOL_F_LOCAL; + } else if (pol->flags & MPOL_F_RELATIVE_NODES) { + mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); + pol->v.preferred_node = first_node(tmp); + } else if (!(pol->flags & MPOL_F_LOCAL)) { + pol->v.preferred_node = node_remap(pol->v.preferred_node, + pol->w.cpuset_mems_allowed, + *nodes); + pol->w.cpuset_mems_allowed = *nodes; + } }
/* @@ -410,7 +434,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) { if (!pol || pol->mode == MPOL_LOCAL) return; - if (!mpol_store_user_nodemask(pol) && + if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) return;
@@ -461,9 +485,6 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { .create = mpol_new_bind, .rebind = mpol_rebind_nodemask, }, - [MPOL_LOCAL] = { - .rebind = mpol_rebind_default, - }, [MPOL_PREFERRED_MANY] = { .create = mpol_new_preferred_many, .rebind = mpol_rebind_preferred, @@ -950,12 +971,10 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) case MPOL_PREFERRED_MANY: *nodes = p->v.nodes; break; - case MPOL_LOCAL: - /* return empty node mask for local allocation */ - break; - case MPOL_PREFERRED: - node_set(p->v.preferred_node, *nodes); + if (!(p->flags & MPOL_F_LOCAL)) + node_set(p->v.preferred_node, *nodes); + /* else return empty node mask for local allocation */ break; default: BUG(); @@ -2035,7 +2054,7 @@ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) struct mempolicy_wrapper *warpper;
warpper = container_of(policy, struct mempolicy_wrapper, policy); - if (policy->mode == MPOL_PREFERRED) { + if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL)) { nd = policy->v.preferred_node; } else { /* @@ -2086,11 +2105,14 @@ unsigned int mempolicy_slab_node(void) return node;
policy = current->mempolicy; - if (!policy) + if (!policy || policy->flags & MPOL_F_LOCAL) return node;
switch (policy->mode) { case MPOL_PREFERRED: + /* + * handled MPOL_F_LOCAL above + */ return policy->v.preferred_node;
case MPOL_INTERLEAVE: @@ -2112,8 +2134,6 @@ unsigned int mempolicy_slab_node(void) &policy->v.nodes); return z->zone ? zone_to_nid(z->zone) : node; } - case MPOL_LOCAL: - return node;
default: BUG(); @@ -2237,19 +2257,17 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask) mempolicy = current->mempolicy; switch (mempolicy->mode) { case MPOL_PREFERRED: - nid = mempolicy->v.preferred_node; + if (mempolicy->flags & MPOL_F_LOCAL) + nid = numa_node_id(); + else + nid = mempolicy->v.preferred_node; init_nodemask_of_node(mask, nid); break;
case MPOL_PREFERRED_MANY: case MPOL_BIND: case MPOL_INTERLEAVE: - *mask = mempolicy->v.nodes; - break; - - case MPOL_LOCAL: - nid = numa_node_id(); - init_nodemask_of_node(mask, nid); + *mask = mempolicy->v.nodes; break;
default: @@ -2407,7 +2425,7 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, * If the policy is interleave or does not allow the current * node in its nodemask, we allocate the standard way. */ - if (pol->mode == MPOL_PREFERRED) + if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL)) hpage_node = pol->v.preferred_node;
nmask = policy_nodemask(gfp, pol); @@ -2561,9 +2579,10 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) return !!nodes_equal(a->v.nodes, b->v.nodes); case MPOL_PREFERRED: case MPOL_PREFERRED_MANY: + /* a's ->flags is the same as b's */ + if (a->flags & MPOL_F_LOCAL) + return true; return a->v.preferred_node == b->v.preferred_node; - case MPOL_LOCAL: - return true; default: BUG(); return false; @@ -2701,11 +2720,10 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long break;
case MPOL_PREFERRED: - polnid = pol->v.preferred_node; - break; - - case MPOL_LOCAL: - polnid = numa_node_id(); + if (pol->flags & MPOL_F_LOCAL) + polnid = numa_node_id(); + else + polnid = pol->v.preferred_node; break;
case MPOL_BIND: @@ -3071,6 +3089,9 @@ void numa_default_policy(void) * Parse and format mempolicy from/to strings */
+/* + * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. + */ static const char * const policy_modes[] = { [MPOL_DEFAULT] = "default", @@ -3149,6 +3170,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) */ if (nodelist) goto out; + mode = MPOL_PREFERRED; break; case MPOL_DEFAULT: /* @@ -3193,7 +3215,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) else if (nodelist) new->v.preferred_node = first_node(nodes); else - new->mode = MPOL_LOCAL; + new->flags |= MPOL_F_LOCAL;
/* * Save nodes for contextualization: this will be used to "clone" @@ -3240,10 +3262,12 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
switch (mode) { case MPOL_DEFAULT: - case MPOL_LOCAL: break; case MPOL_PREFERRED: - node_set(pol->v.preferred_node, nodes); + if (flags & MPOL_F_LOCAL) + mode = MPOL_LOCAL; + else + node_set(pol->v.preferred_node, nodes); break; case MPOL_PREFERRED_MANY: case MPOL_BIND:
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,转换为PR失败! 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/G... 失败原因:补丁集缺失封面信息 建议解决方法:请提供补丁集并重新发送您的补丁集到邮件列表
FeedBack: The patch(es) which you have sent to kernel@openeuler.org has been converted to PR failed! Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/G... Failed Reason: the cover of the patches is missing Suggest Solution: please checkout and apply the patches' cover and send all again
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,转换为PR失败! 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/E... 失败原因:应用补丁/补丁集失败,Patch failed at 0001 Revert "mm/mempolicy: don't handle MPOL_LOCAL like a fake MPOL_PREFERRED policy" 建议解决方法:请查看失败原因, 确认补丁是否可以应用在当前期望分支的最新代码上
FeedBack: The patch(es) which you have sent to kernel@openeuler.org has been converted to PR failed! Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/E... Failed Reason: apply patch(es) failed, Patch failed at 0001 Revert "mm/mempolicy: don't handle MPOL_LOCAL like a fake MPOL_PREFERRED policy" Suggest Solution: please checkout if the failed patch(es) can work on the newest codes in expected branch