
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IC9Q31 -------------------------------- Add percpu cache hit/miss count to calculate the usage efficiency of xcall prefetch framework. Also Add "/proc/xcall/stats" dir, so we can get the xcall prefetch hit ratio on each CPU that initiates a read system call, which is important for performance tuning: cat /proc/xcall/prefetch Use "echo > /proc/xcall/prefetch" to clear the hit/miss count. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- fs/eventpoll.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 968db82175bf..5e4fc3391103 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -774,11 +774,82 @@ static void epi_rcu_free(struct rcu_head *head) #define XCALL_CACHE_PAGE_ORDER 2 #define XCALL_CACHE_BUF_SIZE ((1 << XCALL_CACHE_PAGE_ORDER) * PAGE_SIZE) +DEFINE_PER_CPU_ALIGNED(unsigned long, xcall_cache_hit); +DEFINE_PER_CPU_ALIGNED(unsigned long, xcall_cache_miss); + #define PREFETCH_ITEM_HASH_BITS 6 static DEFINE_HASHTABLE(xcall_item_table, PREFETCH_ITEM_HASH_BITS); static DEFINE_RWLOCK(xcall_table_lock); static struct workqueue_struct *rc_work; +static ssize_t xcall_prefetch_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + int cpu; + + for_each_cpu(cpu, cpu_online_mask) { + *per_cpu_ptr(&xcall_cache_hit, cpu) = 0; + *per_cpu_ptr(&xcall_cache_miss, cpu) = 0; + } + + return count; +} + +static int xcall_prefetch_show(struct seq_file *m, void *v) +{ + unsigned long hit = 0, miss = 0; + unsigned int cpu; + u64 percent; + + for_each_cpu(cpu, cpu_online_mask) { + hit = *per_cpu_ptr(&xcall_cache_hit, cpu); + miss = *per_cpu_ptr(&xcall_cache_miss, cpu); + + if (hit == 0 && miss == 0) + continue; + + percent = DIV_ROUND_CLOSEST(hit * 100ULL, hit + miss); + seq_printf(m, "cpu%u epoll cache_{hit,miss}: %lu,%lu, hit ratio: %llu%%\n", + cpu, hit, miss, percent); + } + return 0; +} + +static int xcall_prefetch_open(struct inode *inode, struct file *file) +{ + return single_open(file, xcall_prefetch_show, NULL); +} + +static const struct proc_ops xcall_prefetch_fops = { + .proc_open = xcall_prefetch_open, + .proc_read = seq_read, + .proc_write = xcall_prefetch_write, + .proc_lseek = seq_lseek, + .proc_release = single_release +}; + +static int __init init_xcall_prefetch_procfs(void) +{ + struct proc_dir_entry *xcall_proc_dir, *prefetch_dir; + + if (!system_supports_xcall()) + return -EACCES; + + xcall_proc_dir = proc_mkdir("xcall", NULL); + if (!xcall_proc_dir) + return -ENOMEM; + prefetch_dir = proc_create("prefetch", 0644, xcall_proc_dir, &xcall_prefetch_fops); + if (!prefetch_dir) + goto rm_xcall_proc_dir; + + return 0; + +rm_xcall_proc_dir: + proc_remove(xcall_proc_dir); + return -ENOMEM; +} +device_initcall(init_xcall_prefetch_procfs); + static inline bool transition_state(struct prefetch_item *pfi, enum cache_state old, enum cache_state new) { @@ -929,6 +1000,7 @@ static int xcall_read(struct prefetch_item *pfi, char __user *buf, size_t count) goto slow_read; if (copy_len == 0) { + this_cpu_inc(xcall_cache_hit); transition_state(pfi, XCALL_CACHE_CANCEL, XCALL_CACHE_NONE); return 0; } @@ -942,9 +1014,11 @@ static int xcall_read(struct prefetch_item *pfi, char __user *buf, size_t count) else transition_state(pfi, XCALL_CACHE_CANCEL, XCALL_CACHE_READY); + this_cpu_inc(xcall_cache_hit); return copy_len; slow_read: + this_cpu_inc(xcall_cache_miss); pfi->len = 0; pfi->pos = 0; cancel_work(&pfi->work); -- 2.34.1