hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/ID5CMS -------------------------------- Due to the default HCR_EL2.TACR value is 1, enable ACTLR_XCALL system-wide by default to avoid the overhead of vCPU trapping out caused by accessing ACTLR_XCALL takes place during scheduling context switch. And Separate the control interface of xcall in userspace into the two parts. The first one aims to register xcall to individual TASK via /proc/[pid]/xcall. The second one amis to register xcall to individual BINARY file via /proc/xcall/comm. So it needs some cleanup to the code to implement the first one. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/include/asm/mmu_context.h | 7 --- arch/arm64/include/asm/xcall.h | 76 ---------------------------- arch/arm64/kernel/cpufeature.c | 71 +++++++++++++------------- arch/arm64/kernel/xcall/entry.S | 7 --- arch/arm64/kernel/xcall/xcall.c | 40 +-------------- arch/arm64/kvm/sys_regs.c | 1 + fs/proc/proc_xcall.c | 64 +---------------------- 7 files changed, 39 insertions(+), 227 deletions(-) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 39595fa03491..a6fb325424e7 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -24,9 +24,6 @@ #include <asm/cputype.h> #include <asm/sysreg.h> #include <asm/tlbflush.h> -#ifdef CONFIG_ACTLR_XCALL_XINT -#include <asm/xcall.h> -#endif extern bool rodata_full; @@ -267,10 +264,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, if (prev != next) __switch_mm(next); -#ifdef CONFIG_ACTLR_XCALL_XINT - cpu_switch_xcall_entry(tsk); -#endif - /* * Update the saved TTBR0_EL1 of the scheduled-in task as the previous * value may have not been initialised yet (activate_mm caller) or the diff --git a/arch/arm64/include/asm/xcall.h b/arch/arm64/include/asm/xcall.h index ee526a520704..5cea7f81a857 100644 --- a/arch/arm64/include/asm/xcall.h +++ b/arch/arm64/include/asm/xcall.h @@ -2,17 +2,12 @@ #ifndef __ASM_XCALL_H #define __ASM_XCALL_H -#include <linux/atomic.h> #include <linux/jump_label.h> #include <linux/mm_types.h> -#include <linux/percpu.h> #include <linux/sched.h> -#include <linux/types.h> #include <linux/xcall.h> #include <linux/refcount.h> -#include <asm/actlr.h> -#include <asm/cpufeature.h> #include <asm/syscall.h> #define SVC_0000 0xd4000001 @@ -101,75 +96,4 @@ struct xcall_info { int xcall_init_task(struct task_struct *p, struct task_struct *orig); void xcall_task_free(struct task_struct *p); - -#ifdef CONFIG_ACTLR_XCALL_XINT -struct hw_xcall_info { - /* Must be first! */ - void *xcall_entry[__NR_syscalls + 1]; - atomic_t xcall_scno_count; - /* keep xcall_entry and xcall scno count consistent */ - spinlock_t lock; -}; - -#define TASK_HW_XINFO(p) ((struct hw_xcall_info *)p->xinfo) -#define XCALL_ENTRY_SIZE (sizeof(unsigned long) * (__NR_syscalls + 1)) - -DECLARE_PER_CPU(void *, __cpu_xcall_entry); -extern void xcall_entry(void); -extern void no_xcall_entry(void); - -static inline bool is_xcall_entry(struct hw_xcall_info *xinfo, unsigned int sc_no) -{ - return xinfo->xcall_entry[sc_no] == xcall_entry; -} - -static inline int set_hw_xcall_entry(struct hw_xcall_info *xinfo, - unsigned int sc_no, bool enable) -{ - spin_lock(&xinfo->lock); - if (enable && !is_xcall_entry(xinfo, sc_no)) { - xinfo->xcall_entry[sc_no] = xcall_entry; - atomic_inc(&xinfo->xcall_scno_count); - } - - if (!enable && is_xcall_entry(xinfo, sc_no)) { - xinfo->xcall_entry[sc_no] = no_xcall_entry; - atomic_dec(&xinfo->xcall_scno_count); - } - spin_unlock(&xinfo->lock); - - return 0; -} - -static inline void cpu_set_arch_xcall(bool enable) -{ - u64 el = read_sysreg(CurrentEL); - u64 val; - - if (el == CurrentEL_EL2) { - val = read_sysreg(actlr_el2); - val = enable ? (val | ACTLR_ELx_XCALL) : (val & ~ACTLR_ELx_XCALL); - write_sysreg(val, actlr_el2); - } else { - val = read_sysreg(actlr_el1); - val = enable ? (val | ACTLR_ELx_XCALL) : (val & ~ACTLR_ELx_XCALL); - write_sysreg(val, actlr_el1); - } -} - -static inline void cpu_switch_xcall_entry(struct task_struct *tsk) -{ - struct hw_xcall_info *xinfo = tsk->xinfo; - - if (!system_uses_xcall_xint() || !tsk->xinfo) - return; - - if (unlikely(atomic_read(&xinfo->xcall_scno_count) > 0)) { - __this_cpu_write(__cpu_xcall_entry, xinfo->xcall_entry); - cpu_set_arch_xcall(true); - } else - cpu_set_arch_xcall(false); -} -#endif /* CONFIG_ACTLR_XCALL_XINT */ - #endif /* __ASM_XCALL_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 480af6df8364..625a77760126 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2443,6 +2443,39 @@ static void mpam_extra_caps(void) #include <asm/xcall.h> DEFINE_STATIC_KEY_FALSE(xcall_enable); +static int __init xcall_setup(char *str) +{ + static_branch_enable(&xcall_enable); + + return 1; +} +__setup("xcall", xcall_setup); + +static bool has_xcall_support(const struct arm64_cpu_capabilities *entry, int __unused) +{ + return static_key_enabled(&xcall_enable); +} +#endif + +#ifdef CONFIG_FAST_IRQ +bool is_xint_support; +static int __init xint_setup(char *str) +{ + if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS)) + return 1; + + is_xint_support = true; + return 1; +} +__setup("xint", xint_setup); + +static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __unused) +{ + return is_xint_support; +} +#endif + +#ifdef CONFIG_ACTLR_XCALL_XINT #define AIDR_ELx_XCALL_SHIFT 32 #define AIDR_ELx_XCALL (UL(1) << AIDR_ELx_XCALL_SHIFT) @@ -2477,40 +2510,6 @@ static bool is_arch_xcall_xint_support(void) return false; } -static int __init xcall_setup(char *str) -{ - if (!is_arch_xcall_xint_support()) - static_branch_enable(&xcall_enable); - - return 1; -} -__setup("xcall", xcall_setup); - -static bool has_xcall_support(const struct arm64_cpu_capabilities *entry, int __unused) -{ - return static_key_enabled(&xcall_enable); -} -#endif - -#ifdef CONFIG_FAST_IRQ -bool is_xint_support; -static int __init xint_setup(char *str) -{ - if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS)) - return 1; - - is_xint_support = true; - return 1; -} -__setup("xint", xint_setup); - -static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __unused) -{ - return is_xint_support; -} -#endif - -#ifdef CONFIG_ACTLR_XCALL_XINT static bool has_arch_xcall_xint_support(const struct arm64_cpu_capabilities *entry, int scope) { return is_arch_xcall_xint_support(); @@ -2554,14 +2553,14 @@ static void cpu_enable_arch_xcall_xint(const struct arm64_cpu_capabilities *__un el = read_sysreg(CurrentEL); if (el == CurrentEL_EL2) { actlr_el2 = read_sysreg(actlr_el2); - actlr_el2 |= ACTLR_ELx_XINT; + actlr_el2 |= (ACTLR_ELx_XINT | ACTLR_ELx_XCALL); write_sysreg(actlr_el2, actlr_el2); isb(); actlr_el2 = read_sysreg(actlr_el2); pr_info("actlr_el2: %llx, cpu:%d\n", actlr_el2, cpu); } else { actlr_el1 = read_sysreg(actlr_el1); - actlr_el1 |= ACTLR_ELx_XINT; + actlr_el1 |= (ACTLR_ELx_XINT | ACTLR_ELx_XCALL); write_sysreg(actlr_el1, actlr_el1); isb(); actlr_el1 = read_sysreg(actlr_el1); diff --git a/arch/arm64/kernel/xcall/entry.S b/arch/arm64/kernel/xcall/entry.S index 7728f32ee962..7b75e8651a2a 100644 --- a/arch/arm64/kernel/xcall/entry.S +++ b/arch/arm64/kernel/xcall/entry.S @@ -151,13 +151,6 @@ alternative_else_nop_endif sb .endm /* .macro hw_xcal_restore_base_regs */ -SYM_CODE_START(no_xcall_entry) - kernel_entry 0, 64 - mov x0, sp - bl el0t_64_sync_handler - b ret_to_user -SYM_CODE_END(no_xcall_entry) - SYM_CODE_START(xcall_entry) hw_xcall_save_base_regs mov x0, sp diff --git a/arch/arm64/kernel/xcall/xcall.c b/arch/arm64/kernel/xcall/xcall.c index d8eaec7e4637..31072c0402f4 100644 --- a/arch/arm64/kernel/xcall/xcall.c +++ b/arch/arm64/kernel/xcall/xcall.c @@ -6,7 +6,6 @@ */ #include <linux/bitmap.h> -#include <linux/percpu.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/xcall.h> @@ -25,45 +24,8 @@ static inline int sw_xcall_init_task(struct task_struct *p, struct task_struct * return 0; } -#ifdef CONFIG_ACTLR_XCALL_XINT -static const void *default_syscall_table[__NR_syscalls + 1] = { - [0 ... __NR_syscalls] = no_xcall_entry, -}; - -asmlinkage DEFINE_PER_CPU(void *, __cpu_xcall_entry) = default_syscall_table; -static inline int hw_xcall_init_task(struct task_struct *p, struct task_struct *orig) -{ - struct hw_xcall_info *p_xinfo, *orig_xinfo; - - p->xinfo = kzalloc(sizeof(struct hw_xcall_info), GFP_KERNEL); - if (!p->xinfo) - return -ENOMEM; - - p_xinfo = TASK_HW_XINFO(p); - spin_lock_init(&p_xinfo->lock); - - if (!orig->xinfo) { - memcpy(p->xinfo, default_syscall_table, XCALL_ENTRY_SIZE); - atomic_set(&p_xinfo->xcall_scno_count, 0); - } else { - orig_xinfo = TASK_HW_XINFO(orig); - spin_lock(&orig_xinfo->lock); - memcpy(p->xinfo, orig->xinfo, XCALL_ENTRY_SIZE); - atomic_set(&p_xinfo->xcall_scno_count, - atomic_read(&orig_xinfo->xcall_scno_count)); - spin_unlock(&orig_xinfo->lock); - } - - return 0; -} -#endif - int xcall_init_task(struct task_struct *p, struct task_struct *orig) { -#ifdef CONFIG_ACTLR_XCALL_XINT - if (system_uses_xcall_xint()) - return hw_xcall_init_task(p, orig); -#endif if (static_branch_unlikely(&xcall_enable)) return sw_xcall_init_task(p, orig); @@ -72,6 +34,6 @@ int xcall_init_task(struct task_struct *p, struct task_struct *orig) void xcall_task_free(struct task_struct *p) { - if (system_uses_xcall_xint() || static_branch_unlikely(&xcall_enable)) + if (static_branch_unlikely(&xcall_enable)) kfree(p->xinfo); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 98af8358296a..14eaa56f2e9d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -17,6 +17,7 @@ #include <linux/printk.h> #include <linux/uaccess.h> +#include <asm/actlr.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/debug-monitors.h> diff --git a/fs/proc/proc_xcall.c b/fs/proc/proc_xcall.c index 5a417bc7cb0a..5f45d0799b33 100644 --- a/fs/proc/proc_xcall.c +++ b/fs/proc/proc_xcall.c @@ -4,57 +4,11 @@ * * Copyright (C) 2025 Huawei Ltd. */ -#include <linux/cpufeature.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <asm/xcall.h> #include "internal.h" -#ifdef CONFIG_ACTLR_XCALL_XINT -static void proc_hw_xcall_show(struct task_struct *p, struct seq_file *m) -{ - struct hw_xcall_info *hw_xinfo = TASK_HW_XINFO(p); - unsigned int i, start = 0, end = 0; - bool in_range = false; - - if (!hw_xinfo) - return; - - for (i = 0; i < __NR_syscalls; i++) { - bool scno_xcall_enable = is_xcall_entry(hw_xinfo, i); - - if (scno_xcall_enable && !in_range) { - in_range = true; - start = i; - } - - if ((!scno_xcall_enable || i == __NR_syscalls - 1) && in_range) { - in_range = false; - end = scno_xcall_enable ? i : i - 1; - if (i == start + 1) - seq_printf(m, "%u,", start); - else - seq_printf(m, "%u-%u,", start, end); - } - } - seq_puts(m, "\n"); -} - -static int proc_set_hw_xcall(struct task_struct *p, unsigned int sc_no, - bool is_clear) -{ - struct hw_xcall_info *hw_xinfo = TASK_HW_XINFO(p); - - if (!is_clear) - return set_hw_xcall_entry(hw_xinfo, sc_no, true); - - if (is_clear) - return set_hw_xcall_entry(hw_xinfo, sc_no, false); - - return -EINVAL; -} -#endif - static int xcall_show(struct seq_file *m, void *v) { struct inode *inode = m->private; @@ -62,20 +16,13 @@ static int xcall_show(struct seq_file *m, void *v) unsigned int rs, re; struct xcall_info *xinfo; - if (!system_uses_xcall_xint() && !static_key_enabled(&xcall_enable)) + if (!static_key_enabled(&xcall_enable)) return -EACCES; p = get_proc_task(inode); if (!p) return -ESRCH; -#ifdef CONFIG_ACTLR_XCALL_XINT - if (system_uses_xcall_xint()) { - proc_hw_xcall_show(p, m); - goto out; - } -#endif - xinfo = TASK_XINFO(p); if (!xinfo) goto out; @@ -124,7 +71,7 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, int is_clear = 0; struct xcall_info *xinfo; - if (!system_uses_xcall_xint() && !static_key_enabled(&xcall_enable)) + if (!static_key_enabled(&xcall_enable)) return -EACCES; memset(buffer, 0, sizeof(buffer)); @@ -148,13 +95,6 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, goto out; } -#ifdef CONFIG_ACTLR_XCALL_XINT - if (system_uses_xcall_xint()) { - ret = proc_set_hw_xcall(p, sc_no, is_clear); - goto out; - } -#endif - xinfo = TASK_XINFO(p); if (!is_clear && !test_bit(sc_no, xinfo->xcall_enable)) ret = xcall_enable_one(xinfo, sc_no); -- 2.34.1