From: Lu Jialin lujialin4@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4IMAK CVE: NA
--------
This patch changes HIGH_ASYNC_RATIO_BASE from 10 to 100 and changes HIGH_ASYNC_RATIO_GAP from 1 to 10. After this patch, user can set high_async_ratio from 0 to 99, which will make memcg async reclaim more delicacy management. If high_async_ratio is smaller than 10, try to reclaim all the pages of the memcg.
Signed-off-by: Lu Jialin lujialin4@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- mm/memcontrol.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e15389aeb641..0082942c9753 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -119,8 +119,8 @@ static bool do_memsw_account(void) */
#define HIGH_ASYNC_RATIO_DEFAULT 0 -#define HIGH_ASYNC_RATIO_BASE 10 -#define HIGH_ASYNC_RATIO_GAP 1 +#define HIGH_ASYNC_RATIO_BASE 100 +#define HIGH_ASYNC_RATIO_GAP 10
/* * Cgroups above their limits are maintained in a RB-Tree, independent of @@ -2405,9 +2405,12 @@ static unsigned long reclaim_high(struct mem_cgroup *memcg, static unsigned long get_reclaim_pages(struct mem_cgroup *memcg) { unsigned long nr_pages = page_counter_read(&memcg->memory); - int ratio = READ_ONCE(memcg->high_async_ratio) - HIGH_ASYNC_RATIO_GAP; - unsigned long safe_pages = READ_ONCE(memcg->memory.high) * ratio / - HIGH_ASYNC_RATIO_BASE; + int ratio = READ_ONCE(memcg->high_async_ratio); + unsigned long safe_pages; + + ratio = ratio < HIGH_ASYNC_RATIO_GAP ? 0 : ratio - HIGH_ASYNC_RATIO_GAP; + safe_pages = READ_ONCE(memcg->memory.high) * ratio / + HIGH_ASYNC_RATIO_BASE;
return (nr_pages > safe_pages) ? (nr_pages - safe_pages) : MEMCG_CHARGE_BATCH;