这个地方要不要新加一个逻辑: 考虑后续新pid加入到cgroup里面情况;
我们memcg->ksm接口只是做使能; 1) 当前实现,遍历已有的使能ksm 2) 新加入pid,需要查看遍历使能ksm
On 2023/12/14 21:38, Nanyong Sun wrote:
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8OIQR
Add control file "memory.ksm" to enable ksm per cgroup. Echo to 1 will set all tasks currently in the cgroup to ksm merge any mode, which means ksm gets enabled for all vma's of a process. Meanwhile echo to 0 will disable ksm for them and unmerge the merged pages. Cat the file will show the above state and ksm related profits of this cgroup.
Signed-off-by: Nanyong Sun sunnanyong@huawei.com
.../admin-guide/cgroup-v1/memory.rst | 1 + mm/memcontrol.c | 110 +++++++++++++++++- 2 files changed, 109 insertions(+), 2 deletions(-)
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index ff456871bf4b..3fdb48435e8e 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -109,6 +109,7 @@ Brief summary of control files. memory.kmem.tcp.failcnt show the number of tcp buf memory usage hits limits memory.kmem.tcp.max_usage_in_bytes show max tcp buf memory usage recorded
memory.ksm set/show ksm merge any mode ==================================== ==========================================
- History
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8a881ab21f6c..be37c2dda785 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -73,6 +73,7 @@ #include <linux/uaccess.h>
#include <trace/events/vmscan.h> +#include <linux/ksm.h>
struct cgroup_subsys memory_cgrp_subsys __read_mostly; EXPORT_SYMBOL(memory_cgrp_subsys); @@ -230,10 +231,15 @@ enum res_type { iter != NULL; \ iter = mem_cgroup_iter(NULL, iter, NULL))
+static inline bool __task_is_dying(struct task_struct *task) +{
- return tsk_is_oom_victim(task) || fatal_signal_pending(task) ||
(task->flags & PF_EXITING);
+}
- static inline bool task_is_dying(void) {
- return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
(current->flags & PF_EXITING);
return __task_is_dying(current); }
/* Some nice accessors for the vmpressure. */
@@ -5010,6 +5016,98 @@ static int mem_cgroup_slab_show(struct seq_file *m, void *p) } #endif
+#ifdef CONFIG_KSM +static int memcg_set_ksm_for_tasks(struct mem_cgroup *memcg, bool enable) +{
- struct task_struct *task;
- struct mm_struct *mm;
- struct css_task_iter it;
- int ret = 0;
- css_task_iter_start(&memcg->css, CSS_TASK_ITER_PROCS, &it);
- while (!ret && (task = css_task_iter_next(&it))) {
if (__task_is_dying(task))
continue;
mm = get_task_mm(task);
if (!mm)
continue;
if (mmap_write_lock_killable(mm)) {
mmput(mm);
continue;
}
if (enable)
ret = ksm_enable_merge_any(mm);
else
ret = ksm_disable_merge_any(mm);
mmap_write_unlock(mm);
mmput(mm);
- }
- css_task_iter_end(&it);
- return ret;
+}
+static int memory_ksm_show(struct seq_file *m, void *v) +{
- unsigned long ksm_merging_pages = 0;
- unsigned long ksm_rmap_items = 0;
- long ksm_process_profits = 0;
- unsigned int tasks = 0;
- struct task_struct *task;
- struct mm_struct *mm;
- struct css_task_iter it;
- struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
- css_task_iter_start(&memcg->css, CSS_TASK_ITER_PROCS, &it);
- while ((task = css_task_iter_next(&it))) {
mm = get_task_mm(task);
if (!mm)
continue;
if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
tasks++;
ksm_rmap_items += mm->ksm_rmap_items;
ksm_merging_pages += mm->ksm_merging_pages;
ksm_process_profits += ksm_process_profit(mm);
mmput(mm);
- }
- css_task_iter_end(&it);
- seq_printf(m, "merge any tasks: %u\n", tasks);
- seq_printf(m, "ksm_rmap_items %lu\n", ksm_rmap_items);
- seq_printf(m, "ksm_merging_pages %lu\n", ksm_merging_pages);
- seq_printf(m, "ksm_process_profits %ld\n", ksm_process_profits);
- return 0;
+}
+static ssize_t memory_ksm_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
+{
- bool enable;
- int err;
- struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
- buf = strstrip(buf);
- if (!buf)
return -EINVAL;
- err = kstrtobool(buf, &enable);
- if (err)
return err;
- err = memcg_set_ksm_for_tasks(memcg, enable);
- if (err)
return err;
- return nbytes;
+} +#endif /* CONFIG_KSM */
static int memory_stat_show(struct seq_file *m, void *v);
static struct cftype mem_cgroup_legacy_files[] = {
@@ -5138,6 +5236,14 @@ static struct cftype mem_cgroup_legacy_files[] = { .write = mem_cgroup_reset, .read_u64 = mem_cgroup_read_u64, }, +#ifdef CONFIG_KSM
- {
.name = "ksm",
.flags = CFTYPE_NOT_ON_ROOT,
.write = memory_ksm_write,
.seq_show = memory_ksm_show,
- },
+#endif { }, /* terminate */ };
在 2023/12/14 20:55, Kefeng Wang 写道:
这个地方要不要新加一个逻辑: 考虑后续新pid加入到cgroup里面情况;
我们memcg->ksm接口只是做使能; 1) 当前实现,遍历已有的使能ksm 2) 新加入pid,需要查看遍历使能ksm
等姚路路来了让他做,包括其他KSM相关的几个需求一起做了
On 2023/12/14 21:38, Nanyong Sun wrote:
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8OIQR
Add control file "memory.ksm" to enable ksm per cgroup. Echo to 1 will set all tasks currently in the cgroup to ksm merge any mode, which means ksm gets enabled for all vma's of a process. Meanwhile echo to 0 will disable ksm for them and unmerge the merged pages. Cat the file will show the above state and ksm related profits of this cgroup.
Signed-off-by: Nanyong Sun sunnanyong@huawei.com
.../admin-guide/cgroup-v1/memory.rst | 1 + mm/memcontrol.c | 110 +++++++++++++++++- 2 files changed, 109 insertions(+), 2 deletions(-)
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index ff456871bf4b..3fdb48435e8e 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -109,6 +109,7 @@ Brief summary of control files. memory.kmem.tcp.failcnt show the number of tcp buf memory usage hits limits memory.kmem.tcp.max_usage_in_bytes show max tcp buf memory usage recorded
- memory.ksm set/show ksm merge any mode
====================================
1. History diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8a881ab21f6c..be37c2dda785 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -73,6 +73,7 @@ #include <linux/uaccess.h> #include <trace/events/vmscan.h> +#include <linux/ksm.h> struct cgroup_subsys memory_cgrp_subsys __read_mostly; EXPORT_SYMBOL(memory_cgrp_subsys); @@ -230,10 +231,15 @@ enum res_type { iter != NULL; \ iter = mem_cgroup_iter(NULL, iter, NULL)) +static inline bool __task_is_dying(struct task_struct *task) +{ + return tsk_is_oom_victim(task) || fatal_signal_pending(task) || + (task->flags & PF_EXITING); +}
static inline bool task_is_dying(void) { - return tsk_is_oom_victim(current) || fatal_signal_pending(current) || - (current->flags & PF_EXITING); + return __task_is_dying(current); } /* Some nice accessors for the vmpressure. */ @@ -5010,6 +5016,98 @@ static int mem_cgroup_slab_show(struct seq_file *m, void *p) } #endif +#ifdef CONFIG_KSM +static int memcg_set_ksm_for_tasks(struct mem_cgroup *memcg, bool enable) +{ + struct task_struct *task; + struct mm_struct *mm; + struct css_task_iter it; + int ret = 0;
+ css_task_iter_start(&memcg->css, CSS_TASK_ITER_PROCS, &it); + while (!ret && (task = css_task_iter_next(&it))) { + if (__task_is_dying(task)) + continue;
+ mm = get_task_mm(task); + if (!mm) + continue;
+ if (mmap_write_lock_killable(mm)) { + mmput(mm); + continue; + }
+ if (enable) + ret = ksm_enable_merge_any(mm); + else + ret = ksm_disable_merge_any(mm);
+ mmap_write_unlock(mm); + mmput(mm); + } + css_task_iter_end(&it);
+ return ret; +}
+static int memory_ksm_show(struct seq_file *m, void *v) +{ + unsigned long ksm_merging_pages = 0; + unsigned long ksm_rmap_items = 0; + long ksm_process_profits = 0; + unsigned int tasks = 0; + struct task_struct *task; + struct mm_struct *mm; + struct css_task_iter it; + struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
+ css_task_iter_start(&memcg->css, CSS_TASK_ITER_PROCS, &it); + while ((task = css_task_iter_next(&it))) { + mm = get_task_mm(task); + if (!mm) + continue;
+ if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + tasks++;
+ ksm_rmap_items += mm->ksm_rmap_items; + ksm_merging_pages += mm->ksm_merging_pages; + ksm_process_profits += ksm_process_profit(mm); + mmput(mm); + } + css_task_iter_end(&it);
+ seq_printf(m, "merge any tasks: %u\n", tasks); + seq_printf(m, "ksm_rmap_items %lu\n", ksm_rmap_items); + seq_printf(m, "ksm_merging_pages %lu\n", ksm_merging_pages); + seq_printf(m, "ksm_process_profits %ld\n", ksm_process_profits); + return 0; +}
+static ssize_t memory_ksm_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + bool enable; + int err; + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ buf = strstrip(buf); + if (!buf) + return -EINVAL;
+ err = kstrtobool(buf, &enable); + if (err) + return err;
+ err = memcg_set_ksm_for_tasks(memcg, enable); + if (err) + return err;
+ return nbytes; +} +#endif /* CONFIG_KSM */
static int memory_stat_show(struct seq_file *m, void *v); static struct cftype mem_cgroup_legacy_files[] = { @@ -5138,6 +5236,14 @@ static struct cftype mem_cgroup_legacy_files[] = { .write = mem_cgroup_reset, .read_u64 = mem_cgroup_read_u64, }, +#ifdef CONFIG_KSM + { + .name = "ksm", + .flags = CFTYPE_NOT_ON_ROOT, + .write = memory_ksm_write, + .seq_show = memory_ksm_show, + }, +#endif { }, /* terminate */ };
.