nack 拆成两个补丁,另外不需要static key
________________________________
王克锋 Wang Kefeng Mobile: +86-50000020962(For Welink,eSpace Calls) Email: wangkefeng.wang@huawei.com
发件人:zhangpeng (AS) <zhangpeng362@huawei.commailto:zhangpeng362@huawei.com> 收件人:kernel <kernel@openeuler.orgmailto:kernel@openeuler.org> 抄 送:Wangkefeng (OS Kernel Lab) <wangkefeng.wang@huawei.commailto:wangkefeng.wang@huawei.com>;zhangpeng (AS) <zhangpeng362@huawei.commailto:zhangpeng362@huawei.com> 时 间:2024-04-20 17:52:05 主 题:[PATCH OLK-6.6 2/2] mm: convert mm's rss stats to use atomic mode
From: ZhangPeng zhangpeng362@huawei.com
maillist inclusion category: performance bugzilla: https://gitee.com/openeuler/kernel/issues/I9IA1I CVE: NA
Reference: https://lore.kernel.org/all/20240418142008.2775308-1-zhangpeng362@huawei.com...
--------------------------------
Since commit f1a7941243c1 ("mm: convert mm's rss stats into percpu_counter"), the rss_stats have converted into percpu_counter, which convert the error margin from (nr_threads * 64) to approximately (nr_cpus ^ 2). However, the new percpu allocation in mm_init() causes a performance regression on fork/exec/shell. Even after commit 14ef95be6f55 ("kernel/fork: group allocation/free of per-cpu counters for mm struct"), the performance of fork/exec/shell is still poor compared to previous kernel versions.
To mitigate performance regression, we delay the allocation of percpu memory for rss_stats. Therefore, we convert mm's rss stats to use percpu_counter atomic mode. For single-thread processes, rss_stat is in atomic mode, which reduces the memory consumption and performance regression caused by using percpu. For multiple-thread processes, rss_stat is switched to the percpu mode to reduce the error margin. We convert rss_stats from atomic mode to percpu mode only when the second thread is created.
Introduce cmdline mm_counter_atomic_mode to determine whether to enable the atomic mode of the counter by default.
After lmbench test, we can get 2% ~ 4% performance improvement for lmbench fork_proc/exec_proc/shell_proc and 6.7% performance improvement for lmbench page_fault (before batch mode[1]).
The test results are as follows:
base base+revert base+this patch
fork_proc 416.3ms 400.0ms (3.9%) 398.6ms (4.2%) exec_proc 2095.9ms 2061.1ms (1.7%) 2047.7ms (2.3%) shell_proc 3028.2ms 2954.7ms (2.4%) 2961.2ms (2.2%) page_fault 0.3603ms 0.3358ms (6.8%) 0.3361ms (6.7%)
[1] https://lore.kernel.org/all/20240412064751.119015-1-wangkefeng.wang@huawei.c...
Suggested-by: Jan Kara jack@suse.cz Signed-off-by: ZhangPeng zhangpeng362@huawei.com Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com --- include/linux/mm.h | 61 ++++++++++++++++++++++++++++++++----- include/trace/events/kmem.h | 4 +-- kernel/fork.c | 28 +++++++++++++++-- 3 files changed, 81 insertions(+), 12 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index 46c7b073824c..6ecec65e7cd3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2579,32 +2579,79 @@ static inline bool get_user_page_fast_only(unsigned long addr, /* * per-process(per-mm_struct) statistics. */ +DECLARE_STATIC_KEY_TRUE(mm_counter_atomic_enable); + +static inline bool mm_counter_in_pcpu_mode(struct percpu_counter *fbc) +{ + if (!static_branch_likely(&mm_counter_atomic_enable)) + return true; + if (percpu_counter_initialized(fbc)) + return true; + return false; +} + static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) { - return percpu_counter_read_positive(&mm->rss_stat[member]); + struct percpu_counter *fbc = &mm->rss_stat[member]; + + if (mm_counter_in_pcpu_mode(fbc)) + return percpu_counter_read_positive(fbc); + + return percpu_counter_atomic_read(fbc); }
void mm_trace_rss_stat(struct mm_struct *mm, int member);
static inline void add_mm_counter(struct mm_struct *mm, int member, long value) { - percpu_counter_add(&mm->rss_stat[member], value); + struct percpu_counter *fbc = &mm->rss_stat[member]; + + if (mm_counter_in_pcpu_mode(fbc)) + percpu_counter_add(fbc, value); + else + percpu_counter_atomic_add(fbc, value);
mm_trace_rss_stat(mm, member); }
static inline void inc_mm_counter(struct mm_struct *mm, int member) { - percpu_counter_inc(&mm->rss_stat[member]); - - mm_trace_rss_stat(mm, member); + add_mm_counter(mm, member, 1); }
static inline void dec_mm_counter(struct mm_struct *mm, int member) { - percpu_counter_dec(&mm->rss_stat[member]); + add_mm_counter(mm, member, -1); +}
- mm_trace_rss_stat(mm, member); +static inline s64 mm_counter_sum(struct mm_struct *mm, int member) +{ + struct percpu_counter *fbc = &mm->rss_stat[member]; + + if (mm_counter_in_pcpu_mode(fbc)) + return percpu_counter_sum(fbc); + + return percpu_counter_atomic_read(fbc); +} + +static inline s64 mm_counter_sum_positive(struct mm_struct *mm, int member) +{ + struct percpu_counter *fbc = &mm->rss_stat[member]; + + if (mm_counter_in_pcpu_mode(fbc)) + return percpu_counter_sum_positive(fbc); + + return percpu_counter_atomic_read(fbc); +} + +static inline int mm_counter_switch_to_pcpu(struct mm_struct *mm) +{ + return percpu_counter_switch_to_pcpu_many(mm->rss_stat, NR_MM_COUNTERS); +} + +static inline void mm_counter_destroy(struct mm_struct *mm) +{ + percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); }
/* Optimized variant when folio is already known not to be anon */ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 58688768ef0f..be39ca5af0ba 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -361,8 +361,8 @@ TRACE_EVENT(rss_stat, __entry->mm_id = mm_ptr_to_hash(mm); __entry->curr = !!(current->mm == mm); __entry->member = member; - __entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member]) - << PAGE_SHIFT); + __entry->size = (mm_counter_sum_positive(mm, member) + << PAGE_SHIFT); ),
TP_printk("mm_id=%u curr=%d type=%s size=%ldB", diff --git a/kernel/fork.c b/kernel/fork.c index 43c7a00bb935..dbafb407e224 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -125,6 +125,8 @@ */ #define MAX_THREADS FUTEX_TID_MASK
+DEFINE_STATIC_KEY_TRUE(mm_counter_atomic_enable); + /* * Protected counters by write_lock_irq(&tasklist_lock) */ @@ -853,7 +855,7 @@ static void check_mm(struct mm_struct *mm) "Please make sure 'struct resident_page_types[]' is updated as well");
for (i = 0; i < NR_MM_COUNTERS; i++) { - long x = percpu_counter_sum(&mm->rss_stat[i]); + long x = mm_counter_sum(mm, i);
if (unlikely(x)) pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n", @@ -954,7 +956,7 @@ void __mmdrop(struct mm_struct *mm) put_user_ns(mm->user_ns); mm_pasid_drop(mm); mm_destroy_cid(mm); - percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); + mm_counter_destroy(mm);
free_mm(mm); } @@ -1357,7 +1359,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, if (mm_alloc_cid(mm)) goto fail_cid;
- if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT, + if (!static_branch_likely(&mm_counter_atomic_enable) && + percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT, NR_MM_COUNTERS)) goto fail_pcpu;
@@ -1783,6 +1786,17 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) if (!oldmm) return 0;
+ /* + * For single-thread processes, rss_stat is in atomic mode, which + * reduces the memory consumption and performance regression caused by + * using percpu. For multiple-thread processes, rss_stat is switched to + * the percpu mode to reduce the error margin. + */ + if (static_branch_likely(&mm_counter_atomic_enable) && + clone_flags & CLONE_THREAD) + if (mm_counter_switch_to_pcpu(oldmm)) + return -ENOMEM; + if (clone_flags & CLONE_VM) { mmget(oldmm); mm = oldmm; @@ -3623,3 +3637,11 @@ int sysctl_max_threads(struct ctl_table *table, int write,
return 0; } + +static int __init mm_counter_atomic_disable(char *buf) +{ + static_branch_disable(&mm_counter_atomic_enable); + + return 0; +} +early_param("mm_counter_atomic_mode", mm_counter_atomic_disable); -- 2.25.1