hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8S9BY CVE: NA
--------------------------------
The process in a memcg with dpool is not allowed to migrate to other memcg with different dpool. Otherwise, the pages can not be guaranteed to be freed.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/dynamic_pool.h | 8 +++++++ mm/dynamic_pool.c | 46 ++++++++++++++++++++++++++++++++++++ mm/memcontrol.c | 4 ++++ 3 files changed, 58 insertions(+)
diff --git a/include/linux/dynamic_pool.h b/include/linux/dynamic_pool.h index da83617c2636..2dd54145a31c 100644 --- a/include/linux/dynamic_pool.h +++ b/include/linux/dynamic_pool.h @@ -57,6 +57,8 @@ struct dynamic_pool { KABI_RESERVE(1) };
+int dynamic_pool_can_attach(struct task_struct *tsk, struct mem_cgroup *memcg); + void dynamic_pool_inherit(struct mem_cgroup *memcg); int dynamic_pool_destroy(struct cgroup *cgrp, bool *clear_css_online);
@@ -70,6 +72,12 @@ int dynamic_pool_reserve_hugepage(struct mem_cgroup *memcg, #else struct dynamic_pool {};
+static inline int dynamic_pool_can_attach(struct task_struct *tsk, + struct mem_cgroup *memcg) +{ + return 0; +} + static inline void dynamic_pool_inherit(struct mem_cgroup *memcg) { } diff --git a/mm/dynamic_pool.c b/mm/dynamic_pool.c index 0c0e04d9ecc5..d05b38f6d8d5 100644 --- a/mm/dynamic_pool.c +++ b/mm/dynamic_pool.c @@ -73,6 +73,28 @@ static struct dynamic_pool *dpool_get_from_memcg(struct mem_cgroup *memcg) return dpool; }
+static struct dynamic_pool *dpool_get_from_task(struct task_struct *tsk) +{ + struct dynamic_pool *dpool = NULL; + struct mem_cgroup *memcg; + + if (!dpool_enabled) + return NULL; + + rcu_read_lock(); + do { + memcg = mem_cgroup_from_task(tsk); + } while (memcg && !css_tryget(&memcg->css)); + rcu_read_unlock(); + if (!memcg) + return NULL; + + dpool = dpool_get_from_memcg(memcg); + css_put(&memcg->css); + + return dpool; +} + /* === demote and promote function ==================================== */
/* @@ -346,6 +368,30 @@ static int dpool_promote_pool(struct dynamic_pool *dpool, int type) return ret; }
+/* === allocation interface =========================================== */ + +int dynamic_pool_can_attach(struct task_struct *tsk, struct mem_cgroup *memcg) +{ + struct dynamic_pool *src_dpool, *dst_dpool; + int ret = 0; + + if (!dpool_enabled) + return 0; + + src_dpool = dpool_get_from_task(tsk); + if (!src_dpool) + return 0; + + dst_dpool = dpool_get_from_memcg(memcg); + if (dst_dpool != src_dpool) + ret = -EPERM; + + dpool_put(src_dpool); + dpool_put(dst_dpool); + + return ret; +} + /* === dynamic pool function ========================================== */
static void dpool_dump_child_memcg(struct mem_cgroup *memcg, void *message) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 237893cb459d..7ab0eec942ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7239,6 +7239,10 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) if (!p) return 0;
+ ret = dynamic_pool_can_attach(p, memcg); + if (ret) + return ret; + /* * We are now committed to this value whatever it is. Changes in this * tunable will only affect upcoming migrations, not the current one.