[PATCH OLK-6.6 v6 0/5] bugfix for xcall2.0
Xinyu Zheng (5): xcall2.0: prefetch: fix value name typos in __do_sys_epoll_pwait xcall2.0: prefetch: keep prefetch module name same with file name xcall2.0: add xcall_subdir_create help to create subdir below /proc/xcall xcall2.0: Fix mem leak in proc_xcall_command xcall2.0: prefetch: introduce struct prefetch_mm_data arch/arm64/include/asm/xcall.h | 4 +- arch/arm64/kernel/xcall/core.c | 25 ++++- arch/arm64/kernel/xcall/proc.c | 61 ++---------- drivers/staging/xcall/prefetch.c | 156 +++++++++++++++++++++---------- include/linux/xcall.h | 1 + 5 files changed, 140 insertions(+), 107 deletions(-) -- 2.34.1
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/release-management/issues/ID5CMS -------------------------------- fix value name typos Fixes: ccb8cd0c6313 ("xcall2.0: Introduce xcall epollwait prefetch feature") Signed-off-by: Xinyu Zheng <zhengxinyu6@huawei.com> --- drivers/staging/xcall/prefetch.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/staging/xcall/prefetch.c b/drivers/staging/xcall/prefetch.c index 923ca2a23c98..649ababa1aa0 100644 --- a/drivers/staging/xcall/prefetch.c +++ b/drivers/staging/xcall/prefetch.c @@ -400,7 +400,7 @@ static long __do_sys_epoll_pwait(struct pt_regs *regs) void __user *buf = (void *)regs->regs[1]; struct prefetch_item *pfi = NULL; struct epoll_event events[MAX_FD] = {0}; - int i, fd, cpu, prefech_task_num; + int i, fd, cpu, prefetch_task_num; long ret; ret = default_sys_call_table()[__NR_epoll_pwait](regs); @@ -410,11 +410,11 @@ static long __do_sys_epoll_pwait(struct pt_regs *regs) if (!current_prefetch_items()) return ret; - prefech_task_num = ret > MAX_FD ? MAX_FD : ret; - if (copy_from_user(events, buf, prefech_task_num * sizeof(struct epoll_event))) + prefetch_task_num = ret > MAX_FD ? MAX_FD : ret; + if (copy_from_user(events, buf, prefetch_task_num * sizeof(struct epoll_event))) return ret; - for (i = 0; i < prefech_task_num; i++) { + for (i = 0; i < prefetch_task_num; i++) { fd = events[i].data; if (!(events[i].events & EPOLLIN) || fd >= MAX_FD) continue; -- 2.34.1
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/release-management/issues/ID5CMS -------------------------------- We should be better keep module name same with kernel module filename. It will be easier for user to register xcall to this module through /proc/xcall/comm, or they may should double check the module name through source file. Fixes: ccb8cd0c6313 ("xcall2.0: Introduce xcall epollwait prefetch feature") Signed-off-by: Xinyu Zheng <zhengxinyu6@huawei.com> --- drivers/staging/xcall/prefetch.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/staging/xcall/prefetch.c b/drivers/staging/xcall/prefetch.c index 649ababa1aa0..f096c314cf4d 100644 --- a/drivers/staging/xcall/prefetch.c +++ b/drivers/staging/xcall/prefetch.c @@ -474,7 +474,7 @@ static long __do_sys_read(struct pt_regs *regs) /* MANDATORY */ struct xcall_prog xcall_prefetch_prog = { - .name = "xcall_prefetch", + .name = "prefetch", .owner = THIS_MODULE, .objs = { { @@ -572,6 +572,4 @@ static void __exit xcall_prefetch_exit(void) module_init(xcall_prefetch_init); module_exit(xcall_prefetch_exit); -MODULE_AUTHOR(""); -MODULE_DESCRIPTION("Xcall Prefetch"); MODULE_LICENSE("GPL"); -- 2.34.1
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/release-management/issues/ID5CMS -------------------------------- Export xcall_subdir_create function to kernel module, then each module can create their private procfs below the /proc/xcall path, which is the root path for xcall Fixes: ccb8cd0c6313 ("xcall2.0: Introduce xcall epollwait prefetch feature") Signed-off-by: Xinyu Zheng <zhengxinyu6@huawei.com> --- arch/arm64/kernel/xcall/proc.c | 10 ++++++++-- drivers/staging/xcall/prefetch.c | 2 +- include/linux/xcall.h | 1 + 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/xcall/proc.c b/arch/arm64/kernel/xcall/proc.c index 12032120c7d6..beb2c66dfc47 100644 --- a/arch/arm64/kernel/xcall/proc.c +++ b/arch/arm64/kernel/xcall/proc.c @@ -16,6 +16,8 @@ static LIST_HEAD(comm_list); static DECLARE_RWSEM(comm_rwsem); +static struct proc_dir_entry *root_xcall_dir; + static void free_xcall_comm(struct xcall_comm *info) { if (!info) @@ -213,6 +215,12 @@ static ssize_t xcall_comm_write(struct file *file, return ret ? ret : nbytes; } +struct proc_dir_entry *xcall_subdir_create(const char *name) +{ + return proc_mkdir(name, root_xcall_dir); +} +EXPORT_SYMBOL(xcall_subdir_create); + static const struct proc_ops xcall_comm_ops = { .proc_open = xcall_comm_open, .proc_read = seq_read, @@ -223,8 +231,6 @@ static const struct proc_ops xcall_comm_ops = { static int __init xcall_proc_init(void) { - struct proc_dir_entry *root_xcall_dir; - if (!static_key_enabled(&xcall_enable)) return 0; diff --git a/drivers/staging/xcall/prefetch.c b/drivers/staging/xcall/prefetch.c index f096c314cf4d..7e033a770f14 100644 --- a/drivers/staging/xcall/prefetch.c +++ b/drivers/staging/xcall/prefetch.c @@ -503,7 +503,7 @@ struct xcall_prog xcall_prefetch_prog = { static int __init init_xcall_prefetch_procfs(void) { - xcall_proc_dir = proc_mkdir("xcall_feature", NULL); + xcall_proc_dir = xcall_subdir_create("prefetch"); if (!xcall_proc_dir) return -ENOMEM; prefetch_dir = proc_create("prefetch", 0640, xcall_proc_dir, diff --git a/include/linux/xcall.h b/include/linux/xcall.h index 510aebe4e7c0..26c60cd3dd38 100644 --- a/include/linux/xcall.h +++ b/include/linux/xcall.h @@ -32,6 +32,7 @@ struct xcall_prog { extern int xcall_prog_register(struct xcall_prog *prog); extern void xcall_prog_unregister(struct xcall_prog *prog); extern void mm_init_xcall_area(struct mm_struct *mm, struct task_struct *p); +extern struct proc_dir_entry *xcall_subdir_create(const char *name); extern void clear_xcall_area(struct mm_struct *mm); extern int xcall_mmap(struct vm_area_struct *vma, struct mm_struct *mm); #else /* !CONFIG_DYNAMIC_XCALL */ -- 2.34.1
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/release-management/issues/ID5CMS -------------------------------- There is a test which is registering and unregistering xcall by /proc/xcall/comm in multithread. Then kmemleak reported a leak unreferenced object 0xffff00104bd86d00 (size 64): comm "bash", pid 4246, jiffies 4295025941 (age 1701.080s) hex dump (first 32 bytes): 60 fd ff 4b 10 00 ff ff 80 6f d8 4b 10 00 ff ff `..K.....o.K.... a0 df 28 04 90 00 ff ff 18 41 aa 04 88 00 ff ff ..(......A...... backtrace: kmemleak_alloc+0xb4/0xd0 __kmem_cache_alloc_node+0x330/0x420 kmalloc_trace+0x48/0xf8 proc_xcall_command+0x44/0x378 xcall_comm_write+0xc8/0x160 proc_reg_write+0x110/0x180 vfs_write+0x150/0x3a8 ksys_write+0xd0/0x180 __arm64_sys_write+0x4c/0x68 invoke_syscall+0x68/0x1a0 el0_svc_common.constprop.0+0x11c/0x150 do_el0_svc+0x68/0xb0 el0_slow_syscall+0x44/0x1e8 .slow_syscall+0x18c/0x190 In proccesing below, we cannot promise xcall_comm_list and xcall_comm is consistent. This is why the memleak happened. And it has another bug which is xcall has been unregistered, but we can still find it in /proc/ xcall/comm. ------------------------------------------------------- A thread B thread ------------------------------------------------------- xcall_attach insert_xcall_locked xcall_detach delete_xcall_comm_locked insert_xcall_comm_locked ------------------------------------------------------- To solve this problem, merge struct xcall_comm into struct xcall. Make sure this two struct is protected by the same lock. So they will be consistent. Fixes: 7d904de7feb2 ("xcall2.0: Add userspace proc interface") Signed-off-by: Xinyu Zheng <zhengxinyu6@huawei.com> --- arch/arm64/include/asm/xcall.h | 4 ++- arch/arm64/kernel/xcall/core.c | 25 +++++++++++++--- arch/arm64/kernel/xcall/proc.c | 53 ++-------------------------------- 3 files changed, 27 insertions(+), 55 deletions(-) diff --git a/arch/arm64/include/asm/xcall.h b/arch/arm64/include/asm/xcall.h index c9143b7d2096..0d0c4c794010 100644 --- a/arch/arm64/include/asm/xcall.h +++ b/arch/arm64/include/asm/xcall.h @@ -26,7 +26,6 @@ struct xcall_comm { char *binary; struct path binary_path; char *module; - struct list_head list; }; struct xcall { @@ -37,6 +36,7 @@ struct xcall { struct inode *binary; struct xcall_prog *program; char *name; + struct xcall_comm *info; }; struct xcall_area { @@ -52,6 +52,8 @@ struct xcall_area { extern const syscall_fn_t *default_sys_call_table(void); #ifdef CONFIG_DYNAMIC_XCALL +extern void free_xcall_comm(struct xcall_comm *info); +extern void xcall_info_show(struct seq_file *m); extern int xcall_attach(struct xcall_comm *info); extern int xcall_detach(struct xcall_comm *info); extern int xcall_pre_sstep_check(struct pt_regs *regs); diff --git a/arch/arm64/kernel/xcall/core.c b/arch/arm64/kernel/xcall/core.c index a88c4ed6e575..18b73c044a8d 100644 --- a/arch/arm64/kernel/xcall/core.c +++ b/arch/arm64/kernel/xcall/core.c @@ -117,6 +117,8 @@ static void put_xcall(struct xcall *xcall) return; kfree(xcall->name); + free_xcall_comm(xcall->info); + if (xcall->program) module_put(xcall->program->owner); @@ -135,16 +137,18 @@ static struct xcall *find_xcall(const char *name, struct inode *binary) return NULL; } -static struct xcall *insert_xcall_locked(struct xcall *xcall) +static struct xcall *insert_xcall_locked(struct xcall *xcall, struct xcall_comm *info) { struct xcall *ret = NULL; spin_lock(&xcall_list_lock); ret = find_xcall(xcall->name, xcall->binary); - if (!ret) + if (!ret) { + xcall->info = info; list_add(&xcall->list, &xcalls_list); - else + } else put_xcall(ret); + spin_unlock(&xcall_list_lock); return ret; } @@ -284,6 +288,19 @@ void clear_xcall_area(struct mm_struct *mm) mm->xcall = NULL; } +void xcall_info_show(struct seq_file *m) +{ + struct xcall *xcall; + + spin_lock(&xcall_list_lock); + list_for_each_entry(xcall, &xcalls_list, list) { + seq_printf(m, "+:%s %s %s\n", + xcall->info->name, xcall->info->binary, + xcall->info->module); + } + spin_unlock(&xcall_list_lock); +} + int xcall_attach(struct xcall_comm *comm) { struct xcall *xcall; @@ -305,7 +322,7 @@ int xcall_attach(struct xcall_comm *comm) return -ENOMEM; } - if (insert_xcall_locked(xcall)) { + if (insert_xcall_locked(xcall, comm)) { delete_xcall(xcall); return -EINVAL; } diff --git a/arch/arm64/kernel/xcall/proc.c b/arch/arm64/kernel/xcall/proc.c index beb2c66dfc47..9dea182340ef 100644 --- a/arch/arm64/kernel/xcall/proc.c +++ b/arch/arm64/kernel/xcall/proc.c @@ -13,12 +13,9 @@ #include <asm/xcall.h> -static LIST_HEAD(comm_list); -static DECLARE_RWSEM(comm_rwsem); - static struct proc_dir_entry *root_xcall_dir; -static void free_xcall_comm(struct xcall_comm *info) +void free_xcall_comm(struct xcall_comm *info) { if (!info) return; @@ -29,38 +26,6 @@ static void free_xcall_comm(struct xcall_comm *info) kfree(info); } -static struct xcall_comm *find_xcall_comm(struct xcall_comm *comm) -{ - struct xcall_comm *temp; - - list_for_each_entry(temp, &comm_list, list) { - if (!strcmp(comm->name, temp->name)) - return temp; - } - - return NULL; -} - -static void delete_xcall_comm_locked(struct xcall_comm *info) -{ - struct xcall_comm *ret; - - down_write(&comm_rwsem); - ret = find_xcall_comm(info); - if (ret) - list_del(&ret->list); - up_write(&comm_rwsem); - free_xcall_comm(ret); -} - -static void insert_xcall_comm_locked(struct xcall_comm *info) -{ - down_write(&comm_rwsem); - if (!find_xcall_comm(info)) - list_add(&info->list, &comm_list); - up_write(&comm_rwsem); -} - static int is_absolute_path(const char *path) { return path[0] == '/'; @@ -141,21 +106,16 @@ int proc_xcall_command(int argc, char **argv) info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; - INIT_LIST_HEAD(&info->list); op = parse_xcall_command(argc, argv, info); switch (op) { case '+': ret = xcall_attach(info); - if (!ret) - insert_xcall_comm_locked(info); - else + if (ret) free_xcall_comm(info); break; case '-': ret = xcall_detach(info); - if (!ret) - delete_xcall_comm_locked(info); free_xcall_comm(info); break; default: @@ -168,15 +128,8 @@ int proc_xcall_command(int argc, char **argv) static int xcall_comm_show(struct seq_file *m, void *v) { - struct xcall_comm *info; + xcall_info_show(m); - down_read(&comm_rwsem); - list_for_each_entry(info, &comm_list, list) { - seq_printf(m, "+:%s %s %s\n", - info->name, info->binary, - info->module); - } - up_read(&comm_rwsem); return 0; } -- 2.34.1
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/release-management/issues/ID5CMS -------------------------------- To store per-process data, introducing a prefetch_mm_data. There are three important member in this struct. For prefetch_mm_data.items, this is a prefetch item array. We can use file descriptor value to index to its prefetch item. For prefetch_mm_data.events, it serves in epoll_ctl(). We use this buffer to copy ready fd-list from user. For prefetch_mm_data.mmu_notifier, in my test, when redis is being killed or exit abnormally. It will cause memleak since we has no chance to free the buffer page in prefetch_mm_data.items. To open a window for each process to free their resources, just use this mmu_notifier to register a callback in MMU. Fixes: ccb8cd0c6313 ("xcall2.0: Introduce xcall epollwait prefetch feature") Signed-off-by: Xinyu Zheng <zhengxinyu6@huawei.com> --- drivers/staging/xcall/prefetch.c | 142 +++++++++++++++++++++---------- 1 file changed, 99 insertions(+), 43 deletions(-) diff --git a/drivers/staging/xcall/prefetch.c b/drivers/staging/xcall/prefetch.c index 7e033a770f14..c71d35e77945 100644 --- a/drivers/staging/xcall/prefetch.c +++ b/drivers/staging/xcall/prefetch.c @@ -17,13 +17,13 @@ #include <asm/xcall.h> -#define MAX_FD 100 +#define MAX_FD 1024 #define XCALL_CACHE_PAGE_ORDER 2 #define XCALL_CACHE_BUF_SIZE ((1 << XCALL_CACHE_PAGE_ORDER) * PAGE_SIZE) -#define current_prefetch_items() \ - ((struct prefetch_item *) \ +#define current_prefetch_mm_data() \ + ((struct prefetch_mm_data *) \ ((((struct xcall_area *)(current->mm->xcall))->sys_call_data)[__NR_epoll_pwait])) static DEFINE_PER_CPU_ALIGNED(unsigned long, xcall_cache_hit); @@ -33,6 +33,9 @@ static struct workqueue_struct *rc_work; static struct cpumask xcall_mask; struct proc_dir_entry *xcall_proc_dir, *prefetch_dir, *xcall_mask_dir; +static struct list_head prefetch_mm_data_to_delete; +static spinlock_t prefetch_mm_delete_lock; + enum cache_state { XCALL_CACHE_NONE = 0, XCALL_CACHE_PREFETCH, @@ -53,6 +56,13 @@ struct prefetch_item { loff_t pos; }; +struct prefetch_mm_data { + struct prefetch_item items[MAX_FD]; + struct epoll_event events[MAX_FD]; + struct mmu_notifier mmu_notifier; + struct list_head list; +}; + static ssize_t xcall_mask_proc_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -140,6 +150,17 @@ static const struct proc_ops xcall_prefetch_fops = { .proc_release = single_release }; +static inline struct prefetch_item *get_pfi(unsigned int fd) +{ + struct prefetch_item *pfis = NULL; + + if (fd >= MAX_FD || !current_prefetch_mm_data()) + return NULL; + + pfis = (struct prefetch_item *)current_prefetch_mm_data(); + return pfis + fd; +} + static inline bool transition_state(struct prefetch_item *pfi, enum cache_state old, enum cache_state new) { @@ -200,28 +221,57 @@ static int get_async_prefetch_cpu(struct prefetch_item *pfi) return pfi->cpu; } -static void xcall_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) +static void prefetch_pfi_release(struct mmu_notifier *mn, struct mm_struct *mm) { + struct prefetch_mm_data *private_data = + container_of(mn, struct prefetch_mm_data, mmu_notifier); struct xcall_area *area = mm_xcall_area(mm); - void *area_private_data = NULL; + struct prefetch_item *prefetch_items = NULL; + int i; - area_private_data = xchg(&area->sys_call_data[__NR_epoll_pwait], NULL); - kfree(area_private_data); + private_data = xchg(&area->sys_call_data[__NR_epoll_pwait], NULL); + prefetch_items = (struct prefetch_item *)private_data->items; + for (i = 0; i < MAX_FD; i++) { + cancel_work_sync(&prefetch_items[i].work); + if (prefetch_items[i].file) + fput(prefetch_items[i].file); + if (prefetch_items[i].cache_pages) + __free_pages(prefetch_items[i].cache_pages, XCALL_CACHE_PAGE_ORDER); + prefetch_items[i].cache = NULL; + } + spin_lock(&prefetch_mm_delete_lock); + list_add_tail(&private_data->list, &prefetch_mm_data_to_delete); + spin_unlock(&prefetch_mm_delete_lock); } -static struct mmu_notifier_ops xcall_mmu_notifier_ops = { - .release = xcall_mm_release, -}; +static void xcall_mmu_notifier_free(struct mmu_notifier *mn) +{ + kfree(container_of(mn, struct prefetch_mm_data, mmu_notifier)); +} + +static void xcall_prefetch_mm_free(void) +{ + struct prefetch_mm_data *private_data, *tmp; -static struct mmu_notifier xcall_mmu_notifier = { - .ops = &xcall_mmu_notifier_ops, + spin_lock(&prefetch_mm_delete_lock); + list_for_each_entry_safe(private_data, tmp, &prefetch_mm_data_to_delete, list) { + list_del(&private_data->list); + mmu_notifier_put(&private_data->mmu_notifier); + } + spin_unlock(&prefetch_mm_delete_lock); +} + +static struct mmu_notifier_ops xcall_mmu_notifier_ops = { + .release = prefetch_pfi_release, + .free_notifier = xcall_mmu_notifier_free, }; static void xcall_cancel_work(unsigned int fd) { - struct prefetch_item *pfi = current_prefetch_items() + fd; + struct prefetch_item *pfi = NULL; - if (fd < MAX_FD && pfi->file) + pfi = get_pfi(fd); + if (pfi && pfi->file) cancel_work_sync(&pfi->work); } @@ -291,11 +341,8 @@ static inline int xcall_read_begin(unsigned int fd, char __user *buf, size_t cou { struct prefetch_item *pfi = NULL; - if (fd >= MAX_FD || !current_prefetch_items()) - return -EAGAIN; - - pfi = current_prefetch_items() + fd; - if (!pfi->file) + pfi = get_pfi(fd); + if (!pfi || !pfi->file) return -EAGAIN; return xcall_read(pfi, buf, count); @@ -305,11 +352,8 @@ static inline void xcall_read_end(unsigned int fd) { struct prefetch_item *pfi = NULL; - if (fd >= MAX_FD || !current_prefetch_items()) - return; - - pfi = current_prefetch_items() + fd; - if (!pfi->file) + pfi = get_pfi(fd); + if (!pfi || !pfi->file) return; transition_state(pfi, XCALL_CACHE_CANCEL, XCALL_CACHE_NONE); @@ -320,24 +364,24 @@ static long __do_sys_epoll_create(struct pt_regs *regs) long ret; int i; struct xcall_area *area = mm_xcall_area(current->mm); + struct prefetch_mm_data *private_data = NULL; struct prefetch_item *items = NULL; ret = default_sys_call_table()[__NR_epoll_create1](regs); if (ret < 0) return ret; - - if (current_prefetch_items()) + if (current_prefetch_mm_data()) return ret; - items = kcalloc(MAX_FD, sizeof(struct prefetch_item), GFP_KERNEL); - if (!items) - return -ENOMEM; - - if (cmpxchg(&area->sys_call_data[__NR_epoll_pwait], NULL, items)) { - kfree(items); + private_data = kmalloc(sizeof(struct prefetch_mm_data), GFP_KERNEL); + if (!private_data) + return ret; + if (cmpxchg(&area->sys_call_data[__NR_epoll_pwait], NULL, private_data)) { + kfree(private_data); return ret; } + items = private_data->items; for (i = 0; i < MAX_FD; i++) { items[i].cache_pages = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, XCALL_CACHE_PAGE_ORDER); @@ -354,7 +398,12 @@ static long __do_sys_epoll_create(struct pt_regs *regs) items[i].file = NULL; set_prefetch_numa_cpu(&items[i]); } - mmu_notifier_register(&xcall_mmu_notifier, current->mm); + + memset(private_data->events, 0, sizeof(private_data->events)); + INIT_LIST_HEAD(&private_data->list); + private_data->mmu_notifier.ops = &xcall_mmu_notifier_ops; + mmu_notifier_register(&private_data->mmu_notifier, current->mm); + xcall_prefetch_mm_free(); return ret; } @@ -367,19 +416,18 @@ static long __do_sys_epoll_ctl(struct pt_regs *regs) long ret; ret = default_sys_call_table()[__NR_epoll_ctl](regs); - if (ret || fd >= MAX_FD) + if (ret) return ret; - if (!current_prefetch_items()) + pfi = get_pfi(fd); + if (!pfi) return ret; - pfi = current_prefetch_items() + fd; switch (op) { case EPOLL_CTL_ADD: file = fget(fd); if (!file) return ret; - if (!sock_from_file(file)) { fput(file); return ret; @@ -399,7 +447,8 @@ static long __do_sys_epoll_pwait(struct pt_regs *regs) { void __user *buf = (void *)regs->regs[1]; struct prefetch_item *pfi = NULL; - struct epoll_event events[MAX_FD] = {0}; + struct prefetch_mm_data *private_data = NULL; + struct epoll_event *events = NULL; int i, fd, cpu, prefetch_task_num; long ret; @@ -407,9 +456,12 @@ static long __do_sys_epoll_pwait(struct pt_regs *regs) if (ret <= 0) return ret; - if (!current_prefetch_items()) + if (!current_prefetch_mm_data()) return ret; + private_data = current_prefetch_mm_data(); + events = private_data->events; + prefetch_task_num = ret > MAX_FD ? MAX_FD : ret; if (copy_from_user(events, buf, prefetch_task_num * sizeof(struct epoll_event))) return ret; @@ -419,7 +471,7 @@ static long __do_sys_epoll_pwait(struct pt_regs *regs) if (!(events[i].events & EPOLLIN) || fd >= MAX_FD) continue; - pfi = current_prefetch_items() + fd; + pfi = get_pfi(fd); if (!(pfi->file) || !(pfi->file->f_mode & FMODE_READ)) continue; if (atomic_read(&pfi->state) != XCALL_CACHE_NONE) @@ -438,11 +490,11 @@ static long __do_sys_close(struct pt_regs *regs) struct file *pfi_old_file = NULL; struct file *pfi_new_file = NULL; - if (!current_prefetch_items()) + pfi = get_pfi(fd); + if (!pfi) return default_sys_call_table()[__NR_close](regs); - pfi = current_prefetch_items() + fd; - if (fd < MAX_FD && pfi->file) { + if (pfi && pfi->file) { pfi_old_file = pfi->file; pfi_new_file = cmpxchg(&pfi->file, pfi_old_file, NULL); if (pfi_new_file == pfi_old_file) { @@ -544,6 +596,8 @@ static int __init xcall_prefetch_init(void) if (ret) goto remove_dir; + INIT_LIST_HEAD(&prefetch_mm_data_to_delete); + spin_lock_init(&prefetch_mm_delete_lock); return ret; remove_dir: @@ -568,6 +622,8 @@ static void __exit xcall_prefetch_exit(void) proc_remove(xcall_proc_dir); xcall_prog_unregister(&xcall_prefetch_prog); + xcall_prefetch_mm_free(); + mmu_notifier_synchronize(); } module_init(xcall_prefetch_init); -- 2.34.1
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/19326 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/2ZQ... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/19326 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/2ZQ...
participants (2)
-
patchwork bot -
Xinyu Zheng