
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IC9Q31 -------------------------------- Add "/proc/xcall/cpu_list" to adjust the kernel async prefetch cpu range of the whole machine, default set to the online cpumask, use following command to set the cpu range: echo "$cpu_list" > /proc/xcall/cpu_list Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- fs/eventpoll.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 6322530b2c28..a60e7f7b12ef 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -778,6 +778,45 @@ DEFINE_PER_CPU_ALIGNED(unsigned long, xcall_cache_miss); static DEFINE_HASHTABLE(xcall_item_table, PREFETCH_ITEM_HASH_BITS); static DEFINE_RWLOCK(xcall_table_lock); static struct workqueue_struct *rc_work; +static struct cpumask xcall_mask; + +static ssize_t xcall_mask_proc_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct cpumask tmp; + int err; + + err = cpumask_parselist_user(buf, count, &tmp); + if (err) + return err; + + if (!cpumask_intersects(&tmp, cpu_online_mask)) { + pr_warn("cpu %*pbl is not online.\n", cpumask_pr_args(&tmp)); + return -EINVAL; + } + + cpumask_copy(&xcall_mask, &tmp); + return count; +} + +static int xcall_mask_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%*pbl\n", cpumask_pr_args(&xcall_mask)); + return 0; +} + +static int xcall_mask_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, xcall_mask_proc_show, PDE_DATA(inode)); +} + +static const struct proc_ops xcall_mask_fops = { + .proc_open = xcall_mask_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, + .proc_write = xcall_mask_proc_write, +}; static ssize_t xcall_prefetch_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) @@ -828,7 +867,7 @@ static const struct proc_ops xcall_prefetch_fops = { extern bool fast_syscall_enabled(void); static int __init init_xcall_prefetch_procfs(void) { - struct proc_dir_entry *xcall_proc_dir, *prefetch_dir; + struct proc_dir_entry *xcall_proc_dir, *prefetch_dir, *xcall_mask_dir; if (!fast_syscall_enabled()) return 0; @@ -839,13 +878,19 @@ static int __init init_xcall_prefetch_procfs(void) prefetch_dir = proc_create("prefetch", 0644, xcall_proc_dir, &xcall_prefetch_fops); if (!prefetch_dir) goto rm_xcall_proc_dir; + xcall_mask_dir = proc_create("cpu_list", 0644, xcall_proc_dir, + &xcall_mask_fops); + if (!xcall_mask_dir) + goto rm_prefetch_dir; + cpumask_copy(&xcall_mask, cpu_online_mask); return 0; +rm_prefetch_dir: + proc_remove(prefetch_dir); rm_xcall_proc_dir: proc_remove(xcall_proc_dir); return -ENOMEM; - } device_initcall(init_xcall_prefetch_procfs); @@ -895,9 +940,13 @@ static void prefetch_work_fn(struct work_struct *work) static void set_prefetch_numa_cpu(struct prefetch_item *pfi, int fd) { int cur_cpu = smp_processor_id(); + struct cpumask tmp; int cpu; + cpumask_copy(&tmp, &xcall_mask); cpumask_and(&pfi->related_cpus, cpu_cpu_mask(cur_cpu), cpu_online_mask); + if (cpumask_intersects(&tmp, &pfi->related_cpus)) + cpumask_and(&pfi->related_cpus, &pfi->related_cpus, &tmp); cpu = cpumask_next(fd % cpumask_weight(&pfi->related_cpus), &pfi->related_cpus); if (cpu > cpumask_last(&pfi->related_cpus)) -- 2.34.1