
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IC9Q31 -------------------------------- Refactor the xcall_enable logic to ensure only one reserved kabi in task_struct is used by xcall, which facilitates future xcall expansion. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/include/asm/xcall.h | 18 +++++++++++++++++ arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/asm-offsets.c | 2 +- arch/arm64/kernel/cpufeature.c | 13 +++++-------- arch/arm64/kernel/entry.S | 2 +- arch/arm64/kernel/xcall.c | 34 +++++++++++++++++++++++++++++++++ fs/proc/proc_xcall.c | 29 ++++++++++++++++------------ include/linux/sched.h | 3 ++- kernel/fork.c | 15 +++++++-------- 9 files changed, 86 insertions(+), 31 deletions(-) create mode 100644 arch/arm64/include/asm/xcall.h create mode 100644 arch/arm64/kernel/xcall.c diff --git a/arch/arm64/include/asm/xcall.h b/arch/arm64/include/asm/xcall.h new file mode 100644 index 000000000000..a846f6bed5da --- /dev/null +++ b/arch/arm64/include/asm/xcall.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_XCALL_H +#define __ASM_XCALL_H + +#include <linux/jump_label.h> +#include <linux/sched.h> +#include <linux/types.h> + +DECLARE_STATIC_KEY_FALSE(xcall_enable); + +struct xcall_info { + /* Must be first! */ + DECLARE_BITMAP(xcall_enable, __NR_syscalls); +}; + +int xcall_init_task(struct task_struct *p, struct task_struct *orig); +void xcall_task_free(struct task_struct *p); +#endif /*__ASM_XCALL_H*/ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 5b9951b6fb05..9db801f8fd22 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -81,6 +81,7 @@ obj-$(CONFIG_CVM_GUEST) += cvm_guest.o cvm_tsi.o obj-y += vdso/ probes/ obj-$(CONFIG_COMPAT_VDSO) += vdso32/ obj-$(CONFIG_ARM64_ILP32) += vdso-ilp32/ +obj-$(CONFIG_FAST_SYSCALL) += xcall.o head-y := head.o extra-y += $(head-y) vmlinux.lds diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 7c6ad4b1667b..54a21afd4181 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -27,7 +27,7 @@ int main(void) { #ifdef CONFIG_FAST_SYSCALL - DEFINE(TSK_XCALL, offsetof(struct task_struct, xcall_enable)); + DEFINE(TSK_XCALL, offsetof(struct task_struct, xinfo)); #endif DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); BLANK(); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 85289b1db7bc..9cb9a209b63a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2156,22 +2156,19 @@ static bool can_clearpage_use_stnp(const struct arm64_cpu_capabilities *entry, } #ifdef CONFIG_FAST_SYSCALL -static bool is_xcall_support; +#include <asm/xcall.h> +DEFINE_STATIC_KEY_FALSE(xcall_enable); + static int __init xcall_setup(char *str) { - is_xcall_support = true; + static_branch_enable(&xcall_enable); return 1; } __setup("xcall", xcall_setup); -bool fast_syscall_enabled(void) -{ - return is_xcall_support; -} - static bool has_xcall_support(const struct arm64_cpu_capabilities *entry, int __unused) { - return is_xcall_support; + return static_branch_unlikely(&xcall_enable); } #endif diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 4b340be57d66..976f629c60fe 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -707,7 +707,7 @@ SYM_CODE_END(el1_irq) .endm .macro check_xcall_enable - /* x21 = task_struct->xcall_enable */ + /* x21 = task_struct->xinfo->xcall_enable */ ldr_this_cpu x20, __entry_task, x21 ldr x21, [x20, #TSK_XCALL] /* x20 = sc_no / 8 */ diff --git a/arch/arm64/kernel/xcall.c b/arch/arm64/kernel/xcall.c new file mode 100644 index 000000000000..718094de7b0d --- /dev/null +++ b/arch/arm64/kernel/xcall.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * xcall related code + * + * Copyright (C) 2025 Huawei Ltd. + */ + +#include <linux/bitmap.h> +#include <linux/slab.h> +#include <asm/xcall.h> + +int xcall_init_task(struct task_struct *p, struct task_struct *orig) +{ + if (!static_branch_unlikely(&xcall_enable)) + return 0; + + p->xinfo = kzalloc(sizeof(struct xcall_info), GFP_KERNEL); + if (!p->xinfo) + return -ENOMEM; + + if (orig->xinfo) + bitmap_copy(p->xinfo->xcall_enable, orig->xinfo->xcall_enable, + __NR_syscalls); + + return 0; +} + +void xcall_task_free(struct task_struct *p) +{ + if (!static_branch_unlikely(&xcall_enable)) + return; + + kfree(p->xinfo); +} diff --git a/fs/proc/proc_xcall.c b/fs/proc/proc_xcall.c index e90f28123560..baa40e456ad9 100644 --- a/fs/proc/proc_xcall.c +++ b/fs/proc/proc_xcall.c @@ -7,6 +7,7 @@ #include <linux/cpufeature.h> #include <linux/sched.h> #include <linux/seq_file.h> +#include <asm/xcall.h> #include "internal.h" static int xcall_show(struct seq_file *m, void *v) @@ -14,6 +15,7 @@ static int xcall_show(struct seq_file *m, void *v) struct inode *inode = m->private; struct task_struct *p; unsigned int rs, re; + struct xcall_info *xinfo; if (!system_supports_xcall()) return -EACCES; @@ -22,12 +24,13 @@ static int xcall_show(struct seq_file *m, void *v) if (!p) return -ESRCH; - if (!p->xcall_enable) + xinfo = p->xinfo; + if (!xinfo) goto out; - for (rs = 0, bitmap_next_set_region(p->xcall_enable, &rs, &re, __NR_syscalls); + for (rs = 0, bitmap_next_set_region(xinfo->xcall_enable, &rs, &re, __NR_syscalls); rs < re; rs = re + 1, - bitmap_next_set_region(p->xcall_enable, &rs, &re, __NR_syscalls)) { + bitmap_next_set_region(xinfo->xcall_enable, &rs, &re, __NR_syscalls)) { if (rs == (re - 1)) seq_printf(m, "%d,", rs); else @@ -45,15 +48,15 @@ static int xcall_open(struct inode *inode, struct file *filp) return single_open(filp, xcall_show, inode); } -static int xcall_enable_one(struct task_struct *p, unsigned int sc_no) +static int xcall_enable_one(struct xcall_info *xinfo, unsigned int sc_no) { - bitmap_set(p->xcall_enable, sc_no, 1); + test_and_set_bit(sc_no, xinfo->xcall_enable); return 0; } -static int xcall_disable_one(struct task_struct *p, unsigned int sc_no) +static int xcall_disable_one(struct xcall_info *xinfo, unsigned int sc_no) { - bitmap_clear(p->xcall_enable, sc_no, 1); + test_and_clear_bit(sc_no, xinfo->xcall_enable); return 0; } @@ -67,6 +70,7 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, unsigned int sc_no = __NR_syscalls; int ret = 0; int is_clear = 0; + struct xcall_info *xinfo; if (!system_supports_xcall()) return -EACCES; @@ -76,9 +80,10 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, return -EFAULT; p = get_proc_task(inode); - if (!p || !p->xcall_enable) + if (!p || !p->xinfo) return -ESRCH; + xinfo = p->xinfo; if (buffer[0] == '!') is_clear = 1; @@ -92,10 +97,10 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, goto out; } - if (!is_clear && !test_bit(sc_no, p->xcall_enable)) - ret = xcall_enable_one(p, sc_no); - else if (is_clear && test_bit(sc_no, p->xcall_enable)) - ret = xcall_disable_one(p, sc_no); + if (!is_clear && !test_bit(sc_no, xinfo->xcall_enable)) + ret = xcall_enable_one(xinfo, sc_no); + else if (is_clear && test_bit(sc_no, xinfo->xcall_enable)) + ret = xcall_disable_one(xinfo, sc_no); else ret = -EINVAL; diff --git a/include/linux/sched.h b/include/linux/sched.h index 18361e35a377..4f5739e6bea5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -67,6 +67,7 @@ struct signal_struct; struct task_delay_info; struct task_group; struct io_uring_task; +struct xcall_info; /* * Task state bitmask. NOTE! These bits are also @@ -1478,7 +1479,7 @@ struct task_struct { KABI_RESERVE(14) #endif #if defined(CONFIG_FAST_SYSCALL) - KABI_USE(15, unsigned long *xcall_enable) + KABI_USE(15, struct xcall_info *xinfo) #else KABI_RESERVE(15) #endif diff --git a/kernel/fork.c b/kernel/fork.c index bd7afeb364ab..b4af7886d6e3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -107,6 +107,9 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> +#ifdef CONFIG_FAST_SYSCALL +#include <asm/xcall.h> +#endif #include <trace/events/sched.h> @@ -481,8 +484,7 @@ void free_task(struct task_struct *tsk) sched_relationship_free(tsk); #ifdef CONFIG_FAST_SYSCALL - if (tsk->xcall_enable) - bitmap_free(tsk->xcall_enable); + xcall_task_free(tsk); #endif free_task_struct(tsk); @@ -1015,7 +1017,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) #endif #ifdef CONFIG_FAST_SYSCALL - tsk->xcall_enable = NULL; + tsk->xinfo = NULL; #endif return tsk; @@ -2097,12 +2099,9 @@ static __latent_entropy struct task_struct *copy_process( rt_mutex_init_task(p); #ifdef CONFIG_FAST_SYSCALL - p->xcall_enable = bitmap_zalloc(__NR_syscalls, GFP_KERNEL); - if (!p->xcall_enable) + retval = xcall_init_task(p, current); + if (retval) goto bad_fork_free; - - if (current->xcall_enable) - bitmap_copy(p->xcall_enable, current->xcall_enable, __NR_syscalls); #endif #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY -- 2.34.1