hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9H66Y CVE: NA
--------------------------------
Add a new zram file mem_cgroup to set memcg for zram device. The memcg will be record in zs_pool too when create zs_pool.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- drivers/block/zram/zram_drv.c | 107 +++++++++++++++++++++++++++++++++- drivers/block/zram/zram_drv.h | 4 ++ include/linux/memcontrol.h | 3 + include/linux/zsmalloc.h | 3 + mm/Kconfig | 8 +++ mm/memcontrol.c | 27 +++++++++ mm/zsmalloc.c | 27 +++++++++ 7 files changed, 178 insertions(+), 1 deletion(-)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 955f0c4d358f..0c2c068b2374 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1265,6 +1265,106 @@ static DEVICE_ATTR_RO(bd_stat); #endif static DEVICE_ATTR_RO(debug_stat);
+#ifdef CONFIG_MEMCG_ZRAM +static inline int init_memcg(struct zram *zram, struct mem_cgroup *memcg) +{ + if (init_done(zram)) + return -EINVAL; + + if (zram->memcg) + css_put(&zram->memcg->css); + + zram->memcg = memcg; + + return 0; +} + +static inline void reset_memcg(struct zram *zram) +{ + struct mem_cgroup *memcg = zram->memcg; + + if (!memcg) + return; + + zram->memcg = NULL; + css_put(&memcg->css); +} + + +static ssize_t mem_cgroup_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zram *zram = dev_to_zram(dev); + struct mem_cgroup *memcg = zram->memcg; + + if (mem_cgroup_disabled() || !memcg) + return scnprintf(buf, PAGE_SIZE, "none\n"); + + if (!cgroup_path(memcg->css.cgroup, buf, PATH_MAX)) + return scnprintf(buf, PAGE_SIZE, "none\n"); + + return scnprintf(buf, PAGE_SIZE, "%s\n", buf); +} + +static ssize_t mem_cgroup_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct zram *zram = dev_to_zram(dev); + struct mem_cgroup *memcg; + char *kbuf; + size_t sz; + int ret = 0; + + if (mem_cgroup_disabled()) + return -EINVAL; + + kbuf = kmalloc(PATH_MAX, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + strlcpy(kbuf, buf, PATH_MAX); + sz = strlen(kbuf); + if (sz > 0 && kbuf[sz - 1] == '\n') + kbuf[sz - 1] = 0x00; + + if (!strcmp(kbuf, "none")) { + memcg = NULL; + } else { + memcg = memcg_get_from_path(kbuf, PATH_MAX); + if (!memcg) { + ret = -EINVAL; + goto out; + } + } + + down_write(&zram->init_lock); + ret = init_memcg(zram, memcg); + if (ret && memcg) + css_put(&memcg->css); + up_write(&zram->init_lock); + +out: + kfree(kbuf); + return ret ? ret : len; +} +static DEVICE_ATTR_RW(mem_cgroup); + +static inline struct zs_pool *zram_create_pool(struct zram *zram) +{ + return zs_create_pool_with_memcg(zram->disk->disk_name, + zram->memcg); +} +#else +static inline void reset_memcg(struct zram *zram) +{ +} + +static inline struct zs_pool *zram_create_pool(struct zram *zram) +{ + return zs_create_pool(zram->disk->disk_name); +} +#endif + static void zram_meta_free(struct zram *zram, u64 disksize) { size_t num_pages = disksize >> PAGE_SHIFT; @@ -1287,7 +1387,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) if (!zram->table) return false;
- zram->mem_pool = zs_create_pool(zram->disk->disk_name); + zram->mem_pool = zram_create_pool(zram); if (!zram->mem_pool) { vfree(zram->table); return false; @@ -2141,6 +2241,7 @@ static void zram_reset_device(struct zram *zram) zram->limit_pages = 0;
if (!init_done(zram)) { + reset_memcg(zram); up_write(&zram->init_lock); return; } @@ -2156,6 +2257,7 @@ static void zram_reset_device(struct zram *zram) zram_destroy_comps(zram); memset(&zram->stats, 0, sizeof(zram->stats)); reset_bdev(zram); + reset_memcg(zram);
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor); up_write(&zram->init_lock); @@ -2338,6 +2440,9 @@ static struct attribute *zram_disk_attrs[] = { #ifdef CONFIG_ZRAM_MULTI_COMP &dev_attr_recomp_algorithm.attr, &dev_attr_recompress.attr, +#endif +#ifdef CONFIG_MEMCG_ZRAM + &dev_attr_mem_cgroup.attr, #endif NULL, }; diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index eb13d0299f89..8987e77ac7ee 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -18,6 +18,7 @@ #include <linux/rwsem.h> #include <linux/zsmalloc.h> #include <linux/crypto.h> +#include <linux/memcontrol.h>
#include "zcomp.h"
@@ -142,5 +143,8 @@ struct zram { #ifdef CONFIG_ZRAM_MEMORY_TRACKING struct dentry *debugfs_dir; #endif +#ifdef CONFIG_MEMCG_ZRAM + struct mem_cgroup *memcg; +#endif }; #endif diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ef3a6a8e640f..2804701f75dd 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1325,6 +1325,9 @@ int mem_cgroup_force_empty(struct mem_cgroup *memcg); int memcg_get_swap_type(struct page *page); void memcg_remove_swapfile(int type);
+#ifdef CONFIG_MEMCG_ZRAM +struct mem_cgroup *memcg_get_from_path(char *path, size_t buflen); +#endif #else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0 diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index a48cd0ffe57d..2c09676f9178 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -41,6 +41,9 @@ struct zs_pool_stats { struct zs_pool;
struct zs_pool *zs_create_pool(const char *name); +#ifdef CONFIG_MEMCG_ZRAM +struct zs_pool *zs_create_pool_with_memcg(const char *name, void *memcg); +#endif void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags); diff --git a/mm/Kconfig b/mm/Kconfig index 0f9209cd969b..2642a05f89c1 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -521,6 +521,14 @@ config MEMCG_SWAP_QOS memcg swap control include memory force swapin, swapfile control and swap limit.
+config MEMCG_ZRAM + bool "Enable Memory Cgroup charge of zram usage" + depends on MEMCG_SWAP_QOS && ZRAM + depends on X86 || ARM64 + default n + help + Support to charge zram usage in memory cgroup. + config ETMEM_SCAN tristate "module: etmem page scan for etmem support" depends on ETMEM diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9007c3554771..575f382358e9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3628,6 +3628,33 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, } }
+#ifdef CONFIG_MEMCG_ZRAM +struct mem_cgroup *memcg_get_from_path(char *path, size_t buflen) +{ + struct mem_cgroup *memcg; + char *memcg_path; + + if (mem_cgroup_disabled()) + return NULL; + + memcg_path = kzalloc(buflen, GFP_KERNEL); + if (!memcg_path) + return NULL; + + for_each_mem_cgroup(memcg) { + cgroup_path(memcg->css.cgroup, memcg_path, buflen); + if (!strcmp(path, memcg_path) && css_tryget_online(&memcg->css)) { + mem_cgroup_iter_break(NULL, memcg); + break; + } + } + + kfree(memcg_path); + return memcg; +} +EXPORT_SYMBOL(memcg_get_from_path); +#endif + #ifdef CONFIG_MEMCG_KMEM static int memcg_online_kmem(struct mem_cgroup *memcg) { diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 6079f5625abb..85aba62d777d 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -57,6 +57,7 @@ #include <linux/wait.h> #include <linux/pagemap.h> #include <linux/fs.h> +#include <linux/memcontrol.h>
#define ZSPAGE_MAGIC 0x58
@@ -274,6 +275,9 @@ struct zs_pool { atomic_long_t isolated_pages; bool destroying; #endif +#ifdef CONFIG_MEMCG_ZRAM + struct mem_cgroup *memcg; +#endif };
struct zspage { @@ -2527,10 +2531,33 @@ struct zs_pool *zs_create_pool(const char *name) } EXPORT_SYMBOL_GPL(zs_create_pool);
+#ifdef CONFIG_MEMCG_ZRAM +static inline void zs_set_memcg(struct zs_pool *pool, void *memcg) +{ + if (pool) + pool->memcg = memcg; +} + +struct zs_pool *zs_create_pool_with_memcg(const char *name, void *memcg) +{ + struct zs_pool *pool = zs_create_pool(name); + + zs_set_memcg(pool, memcg); + + return pool; +} +EXPORT_SYMBOL_GPL(zs_create_pool_with_memcg); +#else +static inline void zs_set_memcg(struct zs_pool *pool, void *memcg) +{ +} +#endif + void zs_destroy_pool(struct zs_pool *pool) { int i;
+ zs_set_memcg(pool, NULL); zs_unregister_shrinker(pool); zs_unregister_migration(pool); zs_pool_stat_destroy(pool);