hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9H66Y CVE: NA
--------------------------------
In the previous patch, the zram and zspool can be bounded with memcg. Now support to charge the usage of zspage and zram->table in memory cgroup. This can be help limiting memory usage in container environment and prevent zram resource contention between containers.
Add a new file memory.zram_usage_in_bytes to show zram usage. At the same time, the zram usage will also be added to memory.usage_in_bytes. Now the stats are as follows:
memory.zram_usage_in_bytes = zram_usage memory.usage_in_bytes = program_usage + zram_usage memory.memsw.usage_in_bytes = program_usage + swap_usage
Signed-off-by: Liu Shixin liushixin2@huawei.com --- drivers/block/zram/zram_drv.c | 32 +++++++++++++++++++-- include/linux/memcontrol.h | 5 ++++ mm/memcontrol.c | 52 +++++++++++++++++++++++++++++++++++ mm/zsmalloc.c | 31 +++++++++++++++++++++ 4 files changed, 117 insertions(+), 3 deletions(-)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 0c2c068b2374..a56a549ee92e 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1354,6 +1354,20 @@ static inline struct zs_pool *zram_create_pool(struct zram *zram) return zs_create_pool_with_memcg(zram->disk->disk_name, zram->memcg); } + +static inline void zram_charge_memory(struct zram *zram, unsigned long size) +{ + unsigned long nr_pages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; + + memcg_charge_zram(zram->memcg, nr_pages); +} + +static inline void zram_uncharge_memory(struct zram *zram, unsigned long size) +{ + unsigned long nr_pages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; + + memcg_uncharge_zram(zram->memcg, nr_pages); +} #else static inline void reset_memcg(struct zram *zram) { @@ -1363,11 +1377,20 @@ static inline struct zs_pool *zram_create_pool(struct zram *zram) { return zs_create_pool(zram->disk->disk_name); } + +static inline void zram_charge_memory(struct zram *zram, unsigned long size) +{ +} + +static inline void zram_uncharge_memory(struct zram *zram, unsigned long size) +{ +} #endif
static void zram_meta_free(struct zram *zram, u64 disksize) { size_t num_pages = disksize >> PAGE_SHIFT; + unsigned long size = array_size(num_pages, sizeof(*zram->table)); size_t index;
/* Free all pages that are still in this zram device */ @@ -1376,14 +1399,15 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
zs_destroy_pool(zram->mem_pool); vfree(zram->table); + zram_uncharge_memory(zram, size); }
static bool zram_meta_alloc(struct zram *zram, u64 disksize) { - size_t num_pages; + size_t num_pages = disksize >> PAGE_SHIFT; + unsigned long size = array_size(num_pages, sizeof(*zram->table));
- num_pages = disksize >> PAGE_SHIFT; - zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); + zram->table = vzalloc(size); if (!zram->table) return false;
@@ -1393,6 +1417,8 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) return false; }
+ zram_charge_memory(zram, size); + if (!huge_class_size) huge_class_size = zs_huge_class_size(zram->mem_pool); return true; diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 2804701f75dd..450300805b21 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -248,6 +248,9 @@ struct obj_cgroup { struct swap_device { unsigned long max; int type; +#ifdef CONFIG_MEMCG_ZRAM + atomic64_t zram_usage; +#endif };
/* @@ -1327,6 +1330,8 @@ void memcg_remove_swapfile(int type);
#ifdef CONFIG_MEMCG_ZRAM struct mem_cgroup *memcg_get_from_path(char *path, size_t buflen); +void memcg_charge_zram(struct mem_cgroup *memcg, unsigned int nr_pages); +void memcg_uncharge_zram(struct mem_cgroup *memcg, unsigned int nr_pages); #endif #else /* CONFIG_MEMCG */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 575f382358e9..f7cdcdfa81b1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3653,6 +3653,49 @@ struct mem_cgroup *memcg_get_from_path(char *path, size_t buflen) return memcg; } EXPORT_SYMBOL(memcg_get_from_path); + +static inline void memcg_zram_usage_init(struct mem_cgroup *memcg) +{ + atomic64_set(&memcg->swap_dev->zram_usage, 0); +} + +void memcg_charge_zram(struct mem_cgroup *memcg, unsigned int nr_pages) +{ + if (mem_cgroup_disabled() || !memcg) + return; + + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return; + + page_counter_charge(&memcg->memory, nr_pages); + atomic_long_add(nr_pages, &memcg->swap_dev->zram_usage); +} +EXPORT_SYMBOL_GPL(memcg_charge_zram); + +void memcg_uncharge_zram(struct mem_cgroup *memcg, unsigned int nr_pages) +{ + if (mem_cgroup_disabled() || !memcg) + return; + + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return; + + page_counter_uncharge(&memcg->memory, nr_pages); + atomic_long_sub(nr_pages, &memcg->swap_dev->zram_usage); +} +EXPORT_SYMBOL_GPL(memcg_uncharge_zram); + +static u64 mem_cgroup_zram_usage(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return (u64)atomic64_read(&memcg->swap_dev->zram_usage) * PAGE_SIZE; +} +#else +static inline void memcg_zram_usage_init(struct mem_cgroup *memcg) +{ +} #endif
#ifdef CONFIG_MEMCG_KMEM @@ -4251,6 +4294,8 @@ static void memcg_swap_device_init(struct mem_cgroup *memcg, WRITE_ONCE(memcg->swap_dev->type, READ_ONCE(parent->swap_dev->type)); } + + memcg_zram_usage_init(memcg); }
u64 memcg_swapmax_read(struct cgroup_subsys_state *css, struct cftype *cft) @@ -6247,6 +6292,13 @@ static struct cftype mem_cgroup_legacy_files[] = { .write = memcg_swapfile_write, .seq_show = memcg_swapfile_read, }, +#ifdef CONFIG_MEMCG_ZRAM + { + .name = "zram_usage_in_bytes", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = mem_cgroup_zram_usage, + }, +#endif #endif { .name = "high_async_ratio", diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 85aba62d777d..934101f9f09e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -935,6 +935,35 @@ static int trylock_zspage(struct zspage *zspage) return 0; }
+#ifdef CONFIG_MEMCG_ZRAM +static inline void zs_charge_memory(struct zs_pool *pool, + unsigned long nr_pages) +{ + /* + * Since only zram configures memcg for zs_pool, + * charge the memory in zram usage. + */ + memcg_charge_zram(pool->memcg, nr_pages); +} + +static inline void zs_uncharge_memory(struct zs_pool *pool, + unsigned long nr_pages) +{ + /* See zs_charge_memory() for detail */ + memcg_uncharge_zram(pool->memcg, nr_pages); +} +#else +static inline void zs_charge_memory(struct zs_pool *pool, + unsigned long nr_pages) +{ +} + +static inline void zs_uncharge_memory(struct zs_pool *pool, + unsigned long nr_pages) +{ +} +#endif + static void __free_zspage(struct zs_pool *pool, struct size_class *class, struct zspage *zspage) { @@ -965,6 +994,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); + zs_uncharge_memory(pool, class->pages_per_zspage); }
static void free_zspage(struct zs_pool *pool, struct size_class *class, @@ -1484,6 +1514,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) record_obj(handle, obj); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); + zs_charge_memory(pool, class->pages_per_zspage); zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
/* We completely set up zspage so mark them as movable */