
From: Yipeng Zou <zouyipeng@huawei.com> Add cache mode for fd wakeup in epoll_pwait. In epoll_pwait, recording start and end offset of reads_buf for each fd into metadata. And then, in sys_read, read from cache buffer and copy to user. So, we can async prefetch read data in epoll_pwait. Signed-off-by: Yipeng Zou <zouyipeng@huawei.com> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- fs/eventpoll.c | 272 +++++++++++++++++++++++++++++- fs/open.c | 4 + fs/proc/base.c | 53 +++++- fs/read_write.c | 62 ++++++- include/linux/fs.h | 30 ++++ include/linux/sched.h | 3 + include/linux/sched/task.h | 8 + include/linux/syscalls.h | 7 + include/uapi/asm-generic/unistd.h | 1 + kernel/fork.c | 13 ++ kernel/sysctl.c | 36 ++++ 11 files changed, 482 insertions(+), 7 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 5ce1ea1f452b..189b80488eca 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -128,6 +128,8 @@ struct nested_calls { spinlock_t lock; }; +static struct workqueue_struct *rc_work; + /* * Each file descriptor added to the eventpoll interface will * have an entry of this type linked to the "rbr" RB tree. @@ -229,6 +231,9 @@ struct eventpoll { /* tracks wakeup nests for lockdep validation */ u8 nests; #endif + + /* is need read cache for epoll in event */ + bool is_read_cache; }; /* Wait structure used by the poll hooks */ @@ -768,6 +773,20 @@ static void epi_rcu_free(struct rcu_head *head) kmem_cache_free(epi_cache, epi); } +#ifdef CONFIG_FAST_SYSCALL +void free_pfi(struct file *file) +{ + if (file && file->pfi) { + if (file->pfi->cache) { + kfree(file->pfi->cache); + file->pfi->cache = NULL; + } + kfree(file->pfi); + file->pfi = NULL; + } +} +#endif + /* * Removes a "struct epitem" from the eventpoll RB tree and deallocates * all the associated resources. Must be called with "mtx" held. @@ -783,6 +802,17 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) */ ep_unregister_pollwait(ep, epi); + /* Remove prefetech item */ +#ifdef CONFIG_FAST_SYSCALL + if (current->xcall_select && + test_bit(__NR_epoll_pwait, current->xcall_select) && file->pfi) { + spin_lock(&file->pfi->pfi_lock); + file->pfi->keep_running = false; + spin_unlock(&file->pfi->pfi_lock); + cancel_work_sync(&file->pfi->work); + } +#endif + /* Remove the current item from the list of epoll hooks */ spin_lock(&file->f_lock); list_del_rcu(&epi->fllink); @@ -1191,6 +1221,202 @@ static inline bool chain_epi_lockless(struct epitem *epi) return true; } +int max_fd_cache_pages = 1; +static void do_prefetch_item(struct prefetch_item *pfi) +{ + if (pfi && (pfi->state != EPOLL_FILE_CACHE_QUEUED)) + return; + + if (pfi->len > 0) + return; + + pfi->len = kernel_read(pfi->f, pfi->cache, + max_fd_cache_pages * PAGE_SIZE, &pfi->f->f_pos); + pfi->state = EPOLL_FILE_CACHE_READY; + pfi->keep_running = false; +} + +#ifdef CONFIG_FAST_SYSCALL +struct cpumask xcall_numa_cpumask[4] __read_mostly; +unsigned long *xcall_numa_cpumask_bits0 = cpumask_bits(&xcall_numa_cpumask[0]); +unsigned long *xcall_numa_cpumask_bits1 = cpumask_bits(&xcall_numa_cpumask[1]); +unsigned long *xcall_numa_cpumask_bits2 = cpumask_bits(&xcall_numa_cpumask[2]); +unsigned long *xcall_numa_cpumask_bits3 = cpumask_bits(&xcall_numa_cpumask[3]); + +#ifdef CONFIG_SYSCTL +static void proc_xcall_update(void) +{ + int i; + + /* Remove impossible cpus to keep sysctl output clean. */ + for (i = 0; i < 4; i++) + cpumask_and(&xcall_numa_cpumask[i], &xcall_numa_cpumask[i], cpu_possible_mask); +} + +int proc_xcall_numa_cpumask(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + int err; + + // todo: add lock + err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); + if (!err && write) + proc_xcall_update(); + + return err; +} +#endif /* CONFIG_SYSCTL */ + +static void prefetch_work_fn(struct work_struct *work) +{ + struct prefetch_item *pfi; + + pfi = container_of(work, struct prefetch_item, work); + +retry: + spin_lock(&pfi->pfi_lock); + do_prefetch_item(pfi); + spin_unlock(&pfi->pfi_lock); + + /* Don't release cpu to deal the item as soon as possible */ + if (pfi->keep_running) { + schedule(); + goto retry; + } + + return; +} +#endif + +void rc_prefetch_free(struct read_cache_entry *rc, bool force) +{ + if (!rc) + return; + + /* Only free rc in free_task */ + if (force == false) + return; + + kfree(rc); + return; +} + +struct read_cache_entry* rc_prefetch_alloc(struct task_struct *tsk) +{ + struct read_cache_entry* rc = tsk->rc; + + if (!rc) { + rc = kmalloc(sizeof(struct read_cache_entry), GFP_KERNEL); + if (!rc) + return NULL; + } + + rc->cache_hit = 0; + rc->cache_miss = 0; + rc->cache_queued = 0; + rc->cache_wait = 0; + /* Default async mode */ + rc->sync_mode = 1; + + return rc; +} + +#ifdef CONFIG_FAST_SYSCALL +static int get_nth_cpu_in_cpumask(const struct cpumask *mask, int n) +{ + int count = 0; + int cpu; + + for_each_cpu(cpu, mask) { + if (count == n) + return cpu; + count++; + } + + return cpumask_first(mask); +} + +static int alloc_pfi(struct epitem *epi) +{ + struct file *tfile = epi->ffd.file; + int fd = epi->ffd.fd; + struct prefetch_item *pfi; + int cpu, nid; + + if (!current->xcall_select || + !test_bit(__NR_epoll_pwait, current->xcall_select)) { + tfile->pfi = NULL; + return -EINVAL; + } + + /* Initialization prefetch item */ + pfi = kmalloc(sizeof(struct prefetch_item), GFP_KERNEL); + if (!pfi) + return -ENOMEM; + + pfi->cache = kzalloc(max_fd_cache_pages * PAGE_SIZE, GFP_KERNEL); + if (!pfi->cache) { + kfree(pfi); + return -ENOMEM; + } + + /* Init Read Cache mode */ + pfi->state = EPOLL_FILE_CACHE_NONE; + INIT_WORK(&pfi->work, prefetch_work_fn); + pfi->keep_running = false; + pfi->rc = current->rc; + pfi->fd = fd; + pfi->f = tfile; + pfi->len = 0; + pfi->pos = 0; + cpu = smp_processor_id(); + nid = numa_node_id(); + cpumask_and(&pfi->related_cpus, cpu_cpu_mask(cpu), cpu_online_mask); + if (nid <= 3 && !cpumask_empty(&xcall_numa_cpumask[nid]) && + cpumask_subset(&xcall_numa_cpumask[nid], cpu_cpu_mask(cpu))) + cpumask_and(&pfi->related_cpus, &pfi->related_cpus, &xcall_numa_cpumask[nid]); + pfi->cpu = get_nth_cpu_in_cpumask(&pfi->related_cpus, fd % cpumask_weight(&pfi->related_cpus)); + + tfile->pfi = pfi; + spin_lock_init(&tfile->pfi->pfi_lock); + + return 0; +} +#endif + +static void ep_prefetch_item_enqueue(struct eventpoll *ep, struct epitem *epi) +{ + struct prefetch_item *pfi = epi->ffd.file->pfi; + int t_cpu; + + if (!pfi) { + if (alloc_pfi(epi)) + return; + pfi = epi->ffd.file->pfi; + } + + if (!ep->is_read_cache || !pfi->rc || !pfi->cache || + !(epi->event.events & EPOLLIN) || + pfi->state != EPOLL_FILE_CACHE_NONE) + return; + + if (pfi->cpu == smp_processor_id()) { + t_cpu = cpumask_next(pfi->cpu, &pfi->related_cpus); + if (t_cpu > cpumask_last(&pfi->related_cpus)) + t_cpu = cpumask_first(&pfi->related_cpus); + } else + t_cpu = pfi->cpu; + + spin_lock(&pfi->pfi_lock); + pfi->state = EPOLL_FILE_CACHE_QUEUED; + pfi->rc->cache_queued++; + if (pfi->rc->sync_mode) + do_prefetch_item(pfi); + else + queue_work_on(t_cpu, rc_work, &pfi->work); + spin_unlock(&pfi->pfi_lock); +} + /* * This is the callback that is passed to the wait queue wakeup * mechanism. It is called by the stored file descriptors when they @@ -1751,6 +1977,8 @@ static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head if (!revents) continue; + ep_prefetch_item_enqueue(ep, epi); + if (__put_user(revents, &uevent->events) || __put_user(epi->event.data, &uevent->data)) { list_add(&epi->rdllink, head); @@ -2327,7 +2555,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, * part of the user space epoll_wait(2). */ static int do_epoll_wait(int epfd, struct epoll_event __user *events, - int maxevents, int timeout) + int maxevents, int timeout, bool read_cache_mode) { int error; struct fd f; @@ -2360,9 +2588,17 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events, */ ep = f.file->private_data; + if (read_cache_mode) + ep->is_read_cache = true; + else + ep->is_read_cache = false; + /* Time to fish for events ... */ error = ep_poll(ep, events, maxevents, timeout); + /* Always reset epoll event cache mode to false */ + ep->is_read_cache = false; + error_fput: fdput(f); return error; @@ -2371,7 +2607,7 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events, SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout) { - return do_epoll_wait(epfd, events, maxevents, timeout); + return do_epoll_wait(epfd, events, maxevents, timeout, false); } /* @@ -2383,6 +2619,26 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, size_t, sigsetsize) { int error; + /* + * If the caller wants a certain signal mask to be set during the wait, + * we apply it here. + */ + error = set_user_sigmask(sigmask, sigsetsize); + if (error) + return error; + + error = do_epoll_wait(epfd, events, maxevents, timeout, false); + restore_saved_sigmask_unless(error == -EINTR); + + return error; +} + +#ifdef CONFIG_FAST_SYSCALL +XCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, + int, maxevents, int, timeout, const sigset_t __user *, sigmask, + size_t, sigsetsize) +{ + int error; /* * If the caller wants a certain signal mask to be set during the wait, @@ -2392,11 +2648,15 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, if (error) return error; - error = do_epoll_wait(epfd, events, maxevents, timeout); + if (current->rc) + error = do_epoll_wait(epfd, events, maxevents, timeout, true); + else + error = do_epoll_wait(epfd, events, maxevents, timeout, false); restore_saved_sigmask_unless(error == -EINTR); return error; } +#endif #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, @@ -2415,7 +2675,7 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, if (err) return err; - err = do_epoll_wait(epfd, events, maxevents, timeout); + err = do_epoll_wait(epfd, events, maxevents, timeout, false); restore_saved_sigmask_unless(err == -EINTR); return err; @@ -2454,6 +2714,10 @@ static int __init eventpoll_init(void) pwq_cache = kmem_cache_create("eventpoll_pwq", sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL); + rc_work = alloc_workqueue("eventpoll_rc", 0, 0); + if (!rc_work) + return -ENOMEM; + return 0; } fs_initcall(eventpoll_init); diff --git a/fs/open.c b/fs/open.c index 96de0d3f1a8b..19a8af5c5942 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1287,6 +1287,10 @@ int filp_close(struct file *filp, fl_owner_t id) return 0; } +#ifdef CONFIG_FAST_SYSCALL + free_pfi(filp); +#endif + if (filp->f_op->flush) retval = filp->f_op->flush(filp, id); diff --git a/fs/proc/base.c b/fs/proc/base.c index 4c6fdda92fa4..668309aa6d1d 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3630,6 +3630,15 @@ static int xcall_show(struct seq_file *m, void *v) else seq_printf(m, "xcall_select: NULL\n"); } + + if (p->xcall_select && test_bit(__NR_epoll_pwait, p->xcall_select) && p->rc) { + seq_printf(m, "epoll read cache mode: %s\n", p->rc->sync_mode ? "SYNC" : "ASYNC"); + seq_printf(m, "epoll cache_{hit,miss,queued,wait}: %ld,%ld,%ld,%ld\n", p->rc->cache_hit, + p->rc->cache_miss, + p->rc->cache_queued, + p->rc->cache_wait); + } + out: put_task_struct(p); @@ -3656,6 +3665,8 @@ static int xcall_enable_one(struct task_struct *p, unsigned int sc_no) return 0; } +void rc_prefetch_free(struct read_cache_entry *rc, bool force); + static int xcall_disable_one(struct task_struct *p, unsigned int sc_no) { bitmap_clear(p->xcall_enable, sc_no, 1); @@ -3667,13 +3678,47 @@ static int xcall_disable_one(struct task_struct *p, unsigned int sc_no) bitmap_free(p->xcall_select); p->xcall_select = NULL; } + + // sc_no: 22 is sys_epoll_pwait + // sc_no: 63 is sys_read + if (sc_no == __NR_epoll_pwait) + rc_prefetch_free(p->rc, false); + return 0; } +struct read_cache_entry* rc_prefetch_alloc(struct task_struct *tsk); + static int xcall_select_table(struct task_struct *p, unsigned int sc_no) { BUG_ON(!p->xcall_select); test_and_change_bit(sc_no, p->xcall_select); + + // sc_no: 22 is sys_epoll_pwait + // sc_no: 63 is sys_read + if (sc_no == __NR_epoll_pwait) { + if (test_bit(sc_no, p->xcall_select)) + p->rc = rc_prefetch_alloc(p); + else + rc_prefetch_free(p->rc, false); + } + return 0; +} + +static int xcall_config_one(struct task_struct *p, unsigned int sc_no) +{ + /* Only config when selected */ + if (!p->xcall_select || !test_bit(sc_no, p->xcall_select)) + return 0; + + // sc_no: 22 is sys_epoll_pwait + // sc_no: 63 is sys_read + if (sc_no == __NR_epoll_pwait && p->rc) { + if (p->rc->sync_mode) + p->rc->sync_mode = 0; + else + p->rc->sync_mode = 1; + } return 0; } @@ -3686,7 +3731,7 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, const size_t maxlen = sizeof(buffer) - 1; unsigned int sc_no = __NR_syscalls; int ret = 0; - int is_clear = 0, is_switch = 0; + int is_clear = 0, is_switch = 0, is_config = 0; if (!fast_syscall_enabled()) return -EACCES; @@ -3703,8 +3748,10 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, is_clear = 1; else if ((buffer[0] == '@')) is_switch = 1; + else if ((buffer[0] == '~')) + is_config = 1; - if (kstrtouint(buffer + is_clear + is_switch, 10, &sc_no)) { + if (kstrtouint(buffer + is_clear + is_switch + is_config, 10, &sc_no)) { ret = -EINVAL; goto out; } @@ -3720,6 +3767,8 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, ret = xcall_enable_one(p, sc_no); else if (!is_switch && is_clear && test_bit(sc_no, p->xcall_enable)) ret = xcall_disable_one(p, sc_no); + else if (is_config && test_bit(sc_no, p->xcall_enable)) + ret = xcall_config_one(p, sc_no); else ret = -EINVAL; diff --git a/fs/read_write.c b/fs/read_write.c index da03b3e65cf3..119c2788fac8 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -621,9 +621,69 @@ ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count) { struct fd f = fdget_pos(fd); ssize_t ret = -EBADF; + loff_t pos, *ppos; if (f.file) { - loff_t pos, *ppos = file_ppos(f.file); +#ifdef CONFIG_FAST_SYSCALL + if (current->xcall_select && test_bit(__NR_epoll_pwait, current->xcall_select) && + f.file->pfi && f.file->pfi->cache) { + struct prefetch_item *pfi = f.file->pfi; + ssize_t copy_len; + + if (!spin_trylock(&pfi->pfi_lock)) { + if (current->rc) + current->rc->cache_wait++; + spin_lock(&pfi->pfi_lock); + } + + copy_len = pfi->len; + if ((pfi->state == EPOLL_FILE_CACHE_READY) && + copy_len >= 0) { + ssize_t copy_ret = -1; + + if (copy_len == 0) + copy_ret = 0; + + if (copy_len > 0) { + if (copy_len >= count) + copy_len = count; + + copy_ret = copy_to_user(buf, (void *)(pfi->cache + pfi->pos), copy_len); + pfi->len -= copy_len; + if (pfi->len <= 0) { + pfi->len = 0; + pfi->state = EPOLL_FILE_CACHE_NONE; + } + + pfi->pos += count; + if (pfi->pos >= (max_fd_cache_pages * PAGE_SIZE) || pfi->len == 0) + pfi->pos = 0; + } + + if (current->rc) + current->rc->cache_hit++; + fdput_pos(f); + spin_unlock(&pfi->pfi_lock); + + if (copy_ret == 0) + return copy_len; + else + return -EBADF; + } + /* Always reset cache state to none */ + pfi->len = 0; + pfi->state = EPOLL_FILE_CACHE_NONE; + if (current->rc) + current->rc->cache_miss++; + cancel_work(&pfi->work); + spin_unlock(&pfi->pfi_lock); + + if (copy_len < 0) + return copy_len; + } +#endif + + ppos = file_ppos(f.file); if (ppos) { pos = *ppos; ppos = &pos; diff --git a/include/linux/fs.h b/include/linux/fs.h index a0ea6b64c45d..ff89f615caa4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -947,6 +947,29 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) index < ra->start + ra->size); } +#define EPOLL_FILE_CACHE_NONE 0 +#define EPOLL_FILE_CACHE_QUEUED 1 +#define EPOLL_FILE_CACHE_READY 2 + +struct prefetch_item { + struct file *f; + int fd; + struct read_cache_entry *rc; + bool keep_running; + struct work_struct work; + int cpu; + cpumask_t related_cpus; + char *cache; + ssize_t len; + /* cache state in epoll_wait */ + int state; + spinlock_t pfi_lock; + loff_t pos; +}; + +#define MAX_FD_CACHE 1024 +extern int max_fd_cache_pages; + struct file { union { struct llist_node fu_llist; @@ -994,6 +1017,9 @@ struct file { #else KABI_RESERVE(1) #endif + + /* cache state in epoll_wait */ + struct prefetch_item *pfi; } __randomize_layout __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ @@ -3750,4 +3776,8 @@ static inline bool cachefiles_ondemand_is_enabled(void) } #endif +#ifdef CONFIG_FAST_SYSCALL +void free_pfi(struct file *file); +#endif + #endif /* _LINUX_FS_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index a377bae2064e..fe9a75379b0b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1484,6 +1484,9 @@ struct task_struct { KABI_RESERVE(15) KABI_RESERVE(16) #endif + + struct read_cache_entry* rc; + KABI_AUX_PTR(task_struct) /* CPU-specific state of this task: */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 7d18b2bb9ad5..24087b13607b 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -10,6 +10,14 @@ #include <linux/sched.h> #include <linux/uaccess.h> +struct read_cache_entry { + unsigned long sync_mode; + unsigned long cache_hit; + unsigned long cache_miss; + unsigned long cache_queued; + unsigned long cache_wait; +}; + struct task_struct; struct rusage; union thread_union; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 0e379bcd8194..2527c32adad1 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -236,6 +236,13 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) #define XCALL_DEFINEx(x, sname, ...) \ __XCALL_DEFINEx(x, sname, __VA_ARGS__) + +extern unsigned long *xcall_numa_cpumask_bits0; +extern unsigned long *xcall_numa_cpumask_bits1; +extern unsigned long *xcall_numa_cpumask_bits2; +extern unsigned long *xcall_numa_cpumask_bits3; +int proc_xcall_numa_cpumask(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); #endif #define __PROTECT(...) asmlinkage_protect(__VA_ARGS__) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 9b38861d9ea8..41ed441c3c3a 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -98,6 +98,7 @@ __SYSCALL(__NR_epoll_create1, sys_epoll_create1) __SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) #define __NR_epoll_pwait 22 __SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait) +__XCALL_SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait) /* fs/fcntl.c */ #define __NR_dup 23 diff --git a/kernel/fork.c b/kernel/fork.c index b884ac9cdece..cae4dee8df46 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -438,6 +438,8 @@ static void release_task_stack(struct task_struct *tsk) #endif } +void rc_prefetch_free(struct read_cache_entry *rc, bool force); + #ifdef CONFIG_THREAD_INFO_IN_TASK void put_task_stack(struct task_struct *tsk) { @@ -486,6 +488,11 @@ void free_task(struct task_struct *tsk) if (tsk->xcall_select) bitmap_free(tsk->xcall_select); + + if (tsk->rc) { + rc_prefetch_free(tsk->rc, true); + tsk->rc = NULL; + } #endif free_task_struct(tsk); @@ -1020,6 +1027,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) #ifdef CONFIG_FAST_SYSCALL tsk->xcall_enable = NULL; tsk->xcall_select = NULL; + tsk->rc = NULL; #endif return tsk; @@ -1968,6 +1976,8 @@ static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) mutex_unlock(&oom_adj_mutex); } +struct read_cache_entry* rc_prefetch_alloc(struct task_struct *tsk); + /* * This creates a new process as a copy of the old one, * but does not actually start it yet. @@ -2115,6 +2125,9 @@ static __latent_entropy struct task_struct *copy_process( bitmap_copy(p->xcall_select, current->xcall_select, __NR_syscalls); } + + if (current->rc) + p->rc = rc_prefetch_alloc(p); #endif #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b4b36f8a3149..02b55955b725 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2861,6 +2861,42 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = &hundred_thousand, }, +#endif +#ifdef CONFIG_FAST_SYSCALL + { + .procname = "xcall_numa0_cpumask", + .data = &xcall_numa_cpumask_bits0, + .maxlen = NR_CPUS, + .mode = 0644, + .proc_handler = proc_xcall_numa_cpumask, + }, + { + .procname = "xcall_numa1_cpumask", + .data = &xcall_numa_cpumask_bits1, + .maxlen = NR_CPUS, + .mode = 0644, + .proc_handler = proc_xcall_numa_cpumask, + }, + { + .procname = "xcall_numa2_cpumask", + .data = &xcall_numa_cpumask_bits2, + .maxlen = NR_CPUS, + .mode = 0644, + .proc_handler = proc_xcall_numa_cpumask, + }, + { + .procname = "xcall_numa3_cpumask", + .data = &xcall_numa_cpumask_bits3, + .maxlen = NR_CPUS, + .mode = 0644, + .proc_handler = proc_xcall_numa_cpumask, + }, + { .procname = "max_xcall_cache_pages", + .data = &max_fd_cache_pages, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + }, #endif { } }; -- 2.34.1