
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IBV2E4 -------------------------------- In the svc exception handling process of the ARM64, some auxiliary functions such as debug/trace and core functions such as sched/signal need to be considered. As a result, the svc exception handling process is very "lengthy". In some short syscalls (such as lseek(), getpid(), etc.) handles the syscall function itself with a small percentage of instructions, which also leads to inefficient execution of such syscalls. The idea of xcall can be regarded as a fast svc exception handling path. In the processing process, only the context save and restore and syscall functions are considered. It can be understood that xcall is a fast syscall processing mechanism between vdso and normal syscalls. The svc instruction with the immediate 0xffff is used to transfer xcall information. At the synchronization exception handling entry of the arm64 vectors, the ESR_EL1 register is used to distinguish xcall from normal syscalls. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/Kconfig.turbo | 1 + arch/arm64/include/asm/cpufeature.h | 8 -- arch/arm64/include/asm/exception.h | 5 + arch/arm64/include/asm/mmu_context.h | 7 + arch/arm64/include/asm/xcall.h | 76 +++++++++++ arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/cpufeature.c | 32 +++-- arch/arm64/kernel/entry-common.c | 2 +- arch/arm64/kernel/entry.S | 4 +- arch/arm64/kernel/xcall.c | 35 ----- arch/arm64/kernel/xcall/Makefile | 2 + arch/arm64/kernel/xcall/entry.S | 195 +++++++++++++++++++++++++++ arch/arm64/kernel/xcall/xcall.c | 77 +++++++++++ fs/proc/proc_xcall.c | 65 ++++++++- 14 files changed, 451 insertions(+), 60 deletions(-) delete mode 100644 arch/arm64/kernel/xcall.c create mode 100644 arch/arm64/kernel/xcall/Makefile create mode 100644 arch/arm64/kernel/xcall/entry.S create mode 100644 arch/arm64/kernel/xcall/xcall.c diff --git a/arch/arm64/Kconfig.turbo b/arch/arm64/Kconfig.turbo index 76ad4ea567c3..c4a8e4e889aa 100644 --- a/arch/arm64/Kconfig.turbo +++ b/arch/arm64/Kconfig.turbo @@ -63,6 +63,7 @@ config SECURITY_FEATURE_BYPASS config ACTLR_XCALL_XINT bool "Hardware XCALL and Xint support" + depends on FAST_SYSCALL default n help Use the 0x600 as the offset to the exception vector base address for diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 00c3caddac98..859236125cb1 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -831,13 +831,11 @@ static inline bool system_has_full_ptr_auth(void) return system_supports_address_auth() && system_supports_generic_auth(); } -#ifdef CONFIG_ACTLR_XCALL_XINT static __always_inline bool system_uses_xcall_xint(void) { return IS_ENABLED(CONFIG_ACTLR_XCALL_XINT) && cpus_have_const_cap(ARM64_HAS_HW_XCALL_XINT); } -#endif static __always_inline bool system_uses_irq_prio_masking(void) { @@ -889,12 +887,6 @@ static inline bool system_supports_haft(void) cpus_have_final_cap(ARM64_HAFT); } -static __always_inline bool system_supports_xcall(void) -{ - return IS_ENABLED(CONFIG_FAST_SYSCALL) && - cpus_have_const_cap(ARM64_HAS_XCALL); -} - int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); bool try_emulate_mrs(struct pt_regs *regs, u32 isn); diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index ad688e157c9b..d69f0e6d53f8 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -77,4 +77,9 @@ void do_serror(struct pt_regs *regs, unsigned long esr); void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); + +#ifdef CONFIG_ACTLR_XCALL_XINT +asmlinkage void el0t_64_xint_handler(struct pt_regs *regs); +asmlinkage void el0t_64_xcall_handler(struct pt_regs *regs); +#endif #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a6fb325424e7..39595fa03491 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -24,6 +24,9 @@ #include <asm/cputype.h> #include <asm/sysreg.h> #include <asm/tlbflush.h> +#ifdef CONFIG_ACTLR_XCALL_XINT +#include <asm/xcall.h> +#endif extern bool rodata_full; @@ -264,6 +267,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, if (prev != next) __switch_mm(next); +#ifdef CONFIG_ACTLR_XCALL_XINT + cpu_switch_xcall_entry(tsk); +#endif + /* * Update the saved TTBR0_EL1 of the scheduled-in task as the previous * value may have not been initialised yet (activate_mm caller) or the diff --git a/arch/arm64/include/asm/xcall.h b/arch/arm64/include/asm/xcall.h index d45c32eef7f8..5765a96eed53 100644 --- a/arch/arm64/include/asm/xcall.h +++ b/arch/arm64/include/asm/xcall.h @@ -2,10 +2,15 @@ #ifndef __ASM_XCALL_H #define __ASM_XCALL_H +#include <linux/atomic.h> #include <linux/jump_label.h> +#include <linux/percpu.h> #include <linux/sched.h> #include <linux/types.h> +#include <asm/actlr.h> +#include <asm/cpufeature.h> + DECLARE_STATIC_KEY_FALSE(xcall_enable); struct xcall_info { @@ -17,4 +22,75 @@ struct xcall_info { int xcall_init_task(struct task_struct *p, struct task_struct *orig); void xcall_task_free(struct task_struct *p); + +#ifdef CONFIG_ACTLR_XCALL_XINT +struct hw_xcall_info { + /* Must be first! */ + void *xcall_entry[__NR_syscalls + 1]; + atomic_t xcall_scno_count; + /* keep xcall_entry and xcall scno count consistent */ + spinlock_t lock; +}; + +#define TASK_HW_XINFO(p) ((struct hw_xcall_info *)p->xinfo) +#define XCALL_ENTRY_SIZE (sizeof(unsigned long) * (__NR_syscalls + 1)) + +DECLARE_PER_CPU(void *, __cpu_xcall_entry); +extern void xcall_entry(void); +extern void no_xcall_entry(void); + +static inline bool is_xcall_entry(struct hw_xcall_info *xinfo, unsigned int sc_no) +{ + return xinfo->xcall_entry[sc_no] == xcall_entry; +} + +static inline int set_hw_xcall_entry(struct hw_xcall_info *xinfo, + unsigned int sc_no, bool enable) +{ + spin_lock(&xinfo->lock); + if (enable && !is_xcall_entry(xinfo, sc_no)) { + xinfo->xcall_entry[sc_no] = xcall_entry; + atomic_inc(&xinfo->xcall_scno_count); + } + + if (!enable && is_xcall_entry(xinfo, sc_no)) { + xinfo->xcall_entry[sc_no] = no_xcall_entry; + atomic_dec(&xinfo->xcall_scno_count); + } + spin_unlock(&xinfo->lock); + + return 0; +} + +static inline void cpu_set_arch_xcall(bool enable) +{ + u64 el = read_sysreg(CurrentEL); + u64 val; + + if (el == CurrentEL_EL2) { + val = read_sysreg(actlr_el2); + val = enable ? (val | ACTLR_ELx_XCALL) : (val & ~ACTLR_ELx_XCALL); + write_sysreg(val, actlr_el2); + } else { + val = read_sysreg(actlr_el1); + val = enable ? (val | ACTLR_ELx_XCALL) : (val & ~ACTLR_ELx_XCALL); + write_sysreg(val, actlr_el1); + } +} + +static inline void cpu_switch_xcall_entry(struct task_struct *tsk) +{ + struct hw_xcall_info *xinfo = tsk->xinfo; + + if (!system_uses_xcall_xint() || !tsk->xinfo) + return; + + if (unlikely(atomic_read(&xinfo->xcall_scno_count) > 0)) { + __this_cpu_write(__cpu_xcall_entry, xinfo->xcall_entry); + cpu_set_arch_xcall(true); + } else + cpu_set_arch_xcall(false); +} +#endif /* CONFIG_ACTLR_XCALL_XINT */ + #endif /*__ASM_XCALL_H*/ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 5fe59ab95979..20b8c6466965 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -79,7 +79,7 @@ obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o obj-$(CONFIG_ARM64_ILP32) += vdso-ilp32/ -obj-$(CONFIG_FAST_SYSCALL) += xcall.o +obj-$(CONFIG_FAST_SYSCALL) += xcall/ obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o obj-$(CONFIG_IPI_AS_NMI) += ipi_nmi.o obj-$(CONFIG_HISI_VIRTCCA_GUEST) += virtcca_cvm_guest.o virtcca_cvm_tsi.o diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index cc6ba1a7736e..b26bdaae6e2f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2433,16 +2433,32 @@ static void mpam_extra_caps(void) #include <asm/xcall.h> DEFINE_STATIC_KEY_FALSE(xcall_enable); +static bool is_arch_xcall_xint_support(void) +{ + /* List of CPUs that support Xcall/Xint */ + static const struct midr_range xcall_xint_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_HISI_HIP12), + { /* sentinel */ } + }; + + if (is_midr_in_range_list(read_cpuid_id(), xcall_xint_cpus)) + return true; + + return false; +} + static int __init xcall_setup(char *str) { - static_branch_enable(&xcall_enable); + if (!is_arch_xcall_xint_support()) + static_branch_enable(&xcall_enable); + return 1; } __setup("xcall", xcall_setup); static bool has_xcall_support(const struct arm64_cpu_capabilities *entry, int __unused) { - return static_branch_unlikely(&xcall_enable); + return static_key_enabled(&xcall_enable); } #endif @@ -2467,13 +2483,7 @@ static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __u #ifdef CONFIG_ACTLR_XCALL_XINT static bool has_arch_xcall_xint_support(const struct arm64_cpu_capabilities *entry, int scope) { - /* List of CPUs that support Xcall/Xint */ - static const struct midr_range xcall_xint_cpus[] = { - MIDR_ALL_VERSIONS(MIDR_HISI_HIP12), - { /* sentinel */ } - }; - - return is_midr_in_range_list(read_cpuid_id(), xcall_xint_cpus); + return is_arch_xcall_xint_support(); } static void enable_xcall_xint_vectors(void) @@ -2517,14 +2527,14 @@ static void cpu_enable_arch_xcall_xint(const struct arm64_cpu_capabilities *__un */ write_sysreg_s(read_sysreg_s(SYS_HCR_EL2) | HCR_TACR, SYS_HCR_EL2); actlr_el2 = read_sysreg(actlr_el2); - actlr_el2 |= (ACTLR_ELx_XINT | ACTLR_ELx_XCALL); + actlr_el2 |= ACTLR_ELx_XINT; write_sysreg(actlr_el2, actlr_el2); isb(); actlr_el2 = read_sysreg(actlr_el2); pr_info("actlr_el2: %llx, cpu:%d\n", actlr_el2, cpu); } else { actlr_el1 = read_sysreg(actlr_el1); - actlr_el1 |= (ACTLR_ELx_XINT | ACTLR_ELx_XCALL); + actlr_el1 |= ACTLR_ELx_XINT; write_sysreg(actlr_el1, actlr_el1); isb(); actlr_el1 = read_sysreg(actlr_el1); diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index a90231346751..1e8171c1efe7 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -1054,7 +1054,7 @@ UNHANDLED(el0t, 32, error) #ifdef CONFIG_ACTLR_XCALL_XINT asmlinkage void noinstr el0t_64_xcall_handler(struct pt_regs *regs) { - el0_svc(regs); + el0_xcall(regs); } asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 3d0e5cc1d904..62b59d67b27e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -581,6 +581,8 @@ SYM_CODE_START(vectors) SYM_CODE_END(vectors) #ifdef CONFIG_ACTLR_XCALL_XINT +#include "xcall/entry.S" + .align 11 SYM_CODE_START(vectors_xcall_xint) kernel_ventry 1, t, 64, sync // Synchronous EL1t @@ -598,7 +600,7 @@ SYM_CODE_START(vectors_xcall_xint) kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 kernel_ventry 0, t, 64, error // Error 64-bit EL0 - kernel_ventry 0, t, 64, xcall // XCALL synchronous 64-bit EL0 + xcall_ventry // XCALL synchronous 64-bit EL0 kernel_ventry 0, t, 64, xint // XINT 64-bit EL0 kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 kernel_ventry 0, t, 32, error // Error 32-bit EL0 diff --git a/arch/arm64/kernel/xcall.c b/arch/arm64/kernel/xcall.c deleted file mode 100644 index f6a507a3aed0..000000000000 --- a/arch/arm64/kernel/xcall.c +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * xcall related code - * - * Copyright (C) 2025 Huawei Ltd. - */ - -#include <linux/bitmap.h> -#include <linux/slab.h> -#include <asm/xcall.h> - -int xcall_init_task(struct task_struct *p, struct task_struct *orig) -{ - if (!static_branch_unlikely(&xcall_enable)) - return 0; - - p->xinfo = kzalloc(sizeof(struct xcall_info), GFP_KERNEL); - if (!p->xinfo) - return -ENOMEM; - - if (orig->xinfo) { - bitmap_copy(TASK_XINFO(p)->xcall_enable, TASK_XINFO(orig)->xcall_enable, - __NR_syscalls); - } - - return 0; -} - -void xcall_task_free(struct task_struct *p) -{ - if (!static_branch_unlikely(&xcall_enable)) - return; - - kfree(p->xinfo); -} diff --git a/arch/arm64/kernel/xcall/Makefile b/arch/arm64/kernel/xcall/Makefile new file mode 100644 index 000000000000..0168bd190793 --- /dev/null +++ b/arch/arm64/kernel/xcall/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += xcall.o diff --git a/arch/arm64/kernel/xcall/entry.S b/arch/arm64/kernel/xcall/entry.S new file mode 100644 index 000000000000..401be46f4fc2 --- /dev/null +++ b/arch/arm64/kernel/xcall/entry.S @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Low-level exception handling code + * + * Copyright (C) 2012 ARM Ltd. + * Authors: Catalin Marinas <catalin.marinas@arm.com> + * Will Deacon <will.deacon@arm.com> + */ + + .macro hw_xcall_save_base_regs + stp x0, x1, [sp, #16 * 0] // save x0~x29 + stp x2, x3, [sp, #16 * 1] + stp x4, x5, [sp, #16 * 2] + stp x6, x7, [sp, #16 * 3] + stp x8, x9, [sp, #16 * 4] + stp x10, x11, [sp, #16 * 5] + stp x12, x13, [sp, #16 * 6] + stp x14, x15, [sp, #16 * 7] + stp x16, x17, [sp, #16 * 8] + stp x18, x19, [sp, #16 * 9] + stp x20, x21, [sp, #16 * 10] + stp x22, x23, [sp, #16 * 11] + stp x24, x25, [sp, #16 * 12] + stp x26, x27, [sp, #16 * 13] + stp x28, x29, [sp, #16 * 14] + + clear_gp_regs // clear x0~x29 + mrs x21, sp_el0 + ldr_this_cpu tsk, __entry_task, x20 + msr sp_el0, tsk + +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * Enable IA for in-kernel PAC if the task had it disabled. Although + * this could be implemented with an unconditional MRS which would avoid + * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. + * + * Install the kernel IA key only if IA was enabled in the task. If IA + * was disabled on kernel exit then we would have left the kernel IA + * installed so there is no need to install it again. + */ + ldr x0, [tsk, THREAD_SCTLR_USER] + + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 + b 2f +1: + mrs x0, sctlr_el1 + orr x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: +alternative_else_nop_endif +#endif + +alternative_if ARM64_HAS_ADDRESS_AUTH + isb +alternative_else_nop_endif + + mrs x22, elr_el1 + mrs x23, spsr_el1 + stp lr, x21, [sp, #S_LR] // save LR,USER SP + + stp xzr, xzr, [sp, #S_STACKFRAME] + add x29, sp, #S_STACKFRAME // calc FP + + stp x22, x23, [sp, #S_PC] // save ELR,SPSR + + mov w21, #NO_SYSCALL + str w21, [sp, #S_SYSCALLNO] + + +#ifdef CONFIG_ARM64_PSEUDO_NMI +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_save\@ +alternative_else_nop_endif + + mrs_s x20, SYS_ICC_PMR_EL1 + str x20, [sp, #S_PMR_SAVE] + mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET + msr_s SYS_ICC_PMR_EL1, x20 + +.Lskip_pmr_save\@: +#endif + .endm /* .macro hw_xcall_save_base_regs */ + + .macro hw_xcal_restore_base_regs +#ifdef CONFIG_ARM64_PSEUDO_NMI +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_restore\@ +alternative_else_nop_endif + + ldr x20, [sp, #S_PMR_SAVE] + msr_s SYS_ICC_PMR_EL1, x20 + + /* Ensure priority change is seen by redistributor */ +alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC + dsb sy +alternative_else_nop_endif + +.Lskip_pmr_restore\@: +#endif + + ldp x21, x22, [sp, #S_PC] + ldr x23, [sp, #S_SP] + msr sp_el0, x23 // restore USER SP + +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * IA was enabled for in-kernel PAC. Disable it now if needed, or + * alternatively install the user's IA. All other per-task keys and + * SCTLR bits were updated on task switch. + * + * No kernel C function calls after this. + */ + ldr x0, [tsk, THREAD_SCTLR_USER] + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_user tsk, x0, x1, x2 + b 2f +1: + mrs x0, sctlr_el1 + bic x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: +alternative_else_nop_endif +#endif + + msr elr_el1, x21 // restore ELR + msr spsr_el1, x22 // restore SPSR + + ldp x0, x1, [sp, #16 * 0] // restore x0~x29 + ldp x2, x3, [sp, #16 * 1] + ldp x4, x5, [sp, #16 * 2] + ldp x6, x7, [sp, #16 * 3] + ldp x8, x9, [sp, #16 * 4] + ldp x10, x11, [sp, #16 * 5] + ldp x12, x13, [sp, #16 * 6] + ldp x14, x15, [sp, #16 * 7] + ldp x16, x17, [sp, #16 * 8] + ldp x18, x19, [sp, #16 * 9] + ldp x20, x21, [sp, #16 * 10] + ldp x22, x23, [sp, #16 * 11] + ldp x24, x25, [sp, #16 * 12] + ldp x26, x27, [sp, #16 * 13] + ldp x28, x29, [sp, #16 * 14] + + ldr lr, [sp, #S_LR] + add sp, sp, #PT_REGS_SIZE // restore sp + eret + sb + .endm /* .macro hw_xcal_restore_base_regs */ + +SYM_CODE_START(no_xcall_entry) + ldp x20, x21, [sp, #0] + kernel_entry 0, 64 + mov x0, sp + bl el0t_64_sync_handler + b ret_to_user +SYM_CODE_END(no_xcall_entry) + +SYM_CODE_START(xcall_entry) + ldp x20, x21, [sp, #0] + hw_xcall_save_base_regs + mov x0, sp + bl el0t_64_xcall_handler + hw_xcal_restore_base_regs +SYM_CODE_END(xcall_entry) + +SYM_CODE_START_LOCAL(el0t_64_hw_xcall) + stp x20, x21, [sp, #0] + ldr_this_cpu x21, __cpu_xcall_entry, x20 + mov x20, __NR_syscalls + /* x8 >= __NR_syscalls */ + cmp x8, __NR_syscalls + csel x20, x8, x20, lt + ldr x21, [x21, x20, lsl #3] + br x21 +SYM_CODE_END(el0t_64_hw_xcall) + + .macro xcall_ventry + .align 7 +.Lventry_start\@: + /* + * This must be the first instruction of the EL0 vector entries. It is + * skipped by the trampoline vectors, to trigger the cleanup. + */ + b .Lskip_tramp_vectors_cleanup\@ + mrs x30, tpidrro_el0 + msr tpidrro_el0, xzr +.Lskip_tramp_vectors_cleanup\@: + sub sp, sp, #PT_REGS_SIZE + b el0t_64_hw_xcall +.org .Lventry_start\@ + 128 // Did we overflow the ventry slot? + .endm diff --git a/arch/arm64/kernel/xcall/xcall.c b/arch/arm64/kernel/xcall/xcall.c new file mode 100644 index 000000000000..d8eaec7e4637 --- /dev/null +++ b/arch/arm64/kernel/xcall/xcall.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * xcall related code + * + * Copyright (C) 2025 Huawei Ltd. + */ + +#include <linux/bitmap.h> +#include <linux/percpu.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <asm/xcall.h> + +static inline int sw_xcall_init_task(struct task_struct *p, struct task_struct *orig) +{ + p->xinfo = kzalloc(sizeof(struct xcall_info), GFP_KERNEL); + if (!p->xinfo) + return -ENOMEM; + + if (orig->xinfo) { + bitmap_copy(TASK_XINFO(p)->xcall_enable, TASK_XINFO(orig)->xcall_enable, + __NR_syscalls); + } + + return 0; +} + +#ifdef CONFIG_ACTLR_XCALL_XINT +static const void *default_syscall_table[__NR_syscalls + 1] = { + [0 ... __NR_syscalls] = no_xcall_entry, +}; + +asmlinkage DEFINE_PER_CPU(void *, __cpu_xcall_entry) = default_syscall_table; +static inline int hw_xcall_init_task(struct task_struct *p, struct task_struct *orig) +{ + struct hw_xcall_info *p_xinfo, *orig_xinfo; + + p->xinfo = kzalloc(sizeof(struct hw_xcall_info), GFP_KERNEL); + if (!p->xinfo) + return -ENOMEM; + + p_xinfo = TASK_HW_XINFO(p); + spin_lock_init(&p_xinfo->lock); + + if (!orig->xinfo) { + memcpy(p->xinfo, default_syscall_table, XCALL_ENTRY_SIZE); + atomic_set(&p_xinfo->xcall_scno_count, 0); + } else { + orig_xinfo = TASK_HW_XINFO(orig); + spin_lock(&orig_xinfo->lock); + memcpy(p->xinfo, orig->xinfo, XCALL_ENTRY_SIZE); + atomic_set(&p_xinfo->xcall_scno_count, + atomic_read(&orig_xinfo->xcall_scno_count)); + spin_unlock(&orig_xinfo->lock); + } + + return 0; +} +#endif + +int xcall_init_task(struct task_struct *p, struct task_struct *orig) +{ +#ifdef CONFIG_ACTLR_XCALL_XINT + if (system_uses_xcall_xint()) + return hw_xcall_init_task(p, orig); +#endif + if (static_branch_unlikely(&xcall_enable)) + return sw_xcall_init_task(p, orig); + + return 0; +} + +void xcall_task_free(struct task_struct *p) +{ + if (system_uses_xcall_xint() || static_branch_unlikely(&xcall_enable)) + kfree(p->xinfo); +} diff --git a/fs/proc/proc_xcall.c b/fs/proc/proc_xcall.c index 97504bc60823..5a417bc7cb0a 100644 --- a/fs/proc/proc_xcall.c +++ b/fs/proc/proc_xcall.c @@ -10,6 +10,51 @@ #include <asm/xcall.h> #include "internal.h" +#ifdef CONFIG_ACTLR_XCALL_XINT +static void proc_hw_xcall_show(struct task_struct *p, struct seq_file *m) +{ + struct hw_xcall_info *hw_xinfo = TASK_HW_XINFO(p); + unsigned int i, start = 0, end = 0; + bool in_range = false; + + if (!hw_xinfo) + return; + + for (i = 0; i < __NR_syscalls; i++) { + bool scno_xcall_enable = is_xcall_entry(hw_xinfo, i); + + if (scno_xcall_enable && !in_range) { + in_range = true; + start = i; + } + + if ((!scno_xcall_enable || i == __NR_syscalls - 1) && in_range) { + in_range = false; + end = scno_xcall_enable ? i : i - 1; + if (i == start + 1) + seq_printf(m, "%u,", start); + else + seq_printf(m, "%u-%u,", start, end); + } + } + seq_puts(m, "\n"); +} + +static int proc_set_hw_xcall(struct task_struct *p, unsigned int sc_no, + bool is_clear) +{ + struct hw_xcall_info *hw_xinfo = TASK_HW_XINFO(p); + + if (!is_clear) + return set_hw_xcall_entry(hw_xinfo, sc_no, true); + + if (is_clear) + return set_hw_xcall_entry(hw_xinfo, sc_no, false); + + return -EINVAL; +} +#endif + static int xcall_show(struct seq_file *m, void *v) { struct inode *inode = m->private; @@ -17,13 +62,20 @@ static int xcall_show(struct seq_file *m, void *v) unsigned int rs, re; struct xcall_info *xinfo; - if (!system_supports_xcall()) + if (!system_uses_xcall_xint() && !static_key_enabled(&xcall_enable)) return -EACCES; p = get_proc_task(inode); if (!p) return -ESRCH; +#ifdef CONFIG_ACTLR_XCALL_XINT + if (system_uses_xcall_xint()) { + proc_hw_xcall_show(p, m); + goto out; + } +#endif + xinfo = TASK_XINFO(p); if (!xinfo) goto out; @@ -72,7 +124,7 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, int is_clear = 0; struct xcall_info *xinfo; - if (!system_supports_xcall()) + if (!system_uses_xcall_xint() && !static_key_enabled(&xcall_enable)) return -EACCES; memset(buffer, 0, sizeof(buffer)); @@ -83,7 +135,6 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, if (!p || !p->xinfo) return -ESRCH; - xinfo = TASK_XINFO(p); if (buffer[0] == '!') is_clear = 1; @@ -97,6 +148,14 @@ static ssize_t xcall_write(struct file *file, const char __user *buf, goto out; } +#ifdef CONFIG_ACTLR_XCALL_XINT + if (system_uses_xcall_xint()) { + ret = proc_set_hw_xcall(p, sc_no, is_clear); + goto out; + } +#endif + + xinfo = TASK_XINFO(p); if (!is_clear && !test_bit(sc_no, xinfo->xcall_enable)) ret = xcall_enable_one(xinfo, sc_no); else if (is_clear && test_bit(sc_no, xinfo->xcall_enable)) -- 2.34.1