hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB4PY5
----------------------------------------
The svc exception handling process in ARM64, which includes auxiliary functions for debug/trace and core functions like KPTI, has been identified as overly "lengthy".
This inefficiency is particularly noticeable in short syscalls such as lseek() and getpid(), where the syscall function itself comprises a small percentage of the total instructions executed.
To address this, we introduce the concept of xcall, a fast svc exception handling path that only considers necessary features such as security, context saving, and recovery.
This approach can be seen as a high-speed syscall processing mechanism bridging the gap between vdso and traditional syscalls.
We've implemented a per-task bitmap to enable or disable xcall for specific syscalls.
Users can enable a syscall with the following command:
echo $syscall_nr > /proc/$PID/xcall
To disable a syscall, use:
echo !$syscall_nr > /proc/$PID/xcall
The current status of enabled syscalls can be viewed by:
cat /proc/$PID/xcall
Finally, we've added a kernel boot parameter to control the xcall feature.
To enable xcall, include "xcall" in the kernel boot command.
By default, xcall is disabled.
Signed-off-by: Yipeng Zou zouyipeng@huawei.com --- arch/arm64/Kconfig | 17 +++ arch/arm64/configs/openeuler_defconfig | 1 + arch/arm64/include/asm/exception.h | 3 + arch/arm64/include/asm/thread_info.h | 4 + arch/arm64/kernel/asm-offsets.c | 3 + arch/arm64/kernel/cpufeature.c | 23 +++ arch/arm64/kernel/entry-common.c | 25 ++++ arch/arm64/kernel/entry.S | 13 ++ arch/arm64/kernel/syscall.c | 33 ++++ arch/arm64/kernel/xcall_entry.S | 199 +++++++++++++++++++++++++ arch/arm64/tools/cpucaps | 1 + fs/proc/base.c | 110 ++++++++++++++ include/linux/sched.h | 4 + kernel/fork.c | 17 +++ 14 files changed, 453 insertions(+) create mode 100644 arch/arm64/kernel/xcall_entry.S
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 58ee09b133ac..a0bdb34956b9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1671,6 +1671,23 @@ config ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG config ARCH_SUPPORTS_CRASH_DUMP def_bool y
+config ARCH_SUPPORTS_XCALL + bool "xcall support" + depends on !ARM64_MTE && !KASAN_HW_TAGS + default n + help + This enable support xcall feature. + The svc exception handling process in ARM64, which includes + auxiliary functions for debug/trace and core functions like + KPTI, has been identified as overly "lengthy". + This inefficiency is particularly noticeable in short syscalls + such as lseek() and getpid(), where the syscall function itself + comprises a small percentage of the total instructions executed. + To address this, we introduce the concept of xcall, a fast svc + exception handling path that only considers necessary features + such as security, context saving, and recovery. + Only aarch64 user mode program supports xcall processing. + config TRANS_TABLE def_bool y depends on HIBERNATION || KEXEC_CORE diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index ddbc29bf75f8..2dff66043bae 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -490,6 +490,7 @@ CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +# CONFIG_ARCH_SUPPORTS_XCALL is not set CONFIG_TRANS_TABLE=y # CONFIG_XEN is not set CONFIG_ARCH_FORCE_MAX_ORDER=10 diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index ad688e157c9b..b4aa73d826c6 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -69,6 +69,9 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr); void do_el0_cp15(unsigned long esr, struct pt_regs *regs); int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs); +#ifdef CONFIG_ARCH_SUPPORTS_XCALL +void do_el0_xcall(struct pt_regs *regs, int scno, int sc_nr); +#endif void do_el0_svc_compat(struct pt_regs *regs); void do_el0_fpac(struct pt_regs *regs, unsigned long esr); void do_el1_fpac(struct pt_regs *regs, unsigned long esr); diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 2dd890c8e4f8..d06518b3d776 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -115,6 +115,10 @@ void arch_setup_new_exec(void); _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_SYSCALL_EMU)
+#ifdef CONFIG_ARCH_SUPPORTS_XCALL +#define _TIF_XCALL_WORK _TIF_SYSCALL_AUDIT +#endif + #ifdef CONFIG_SHADOW_CALL_STACK #define INIT_SCS \ .scs_base = init_shadow_call_stack, \ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index e997ad275afb..923324d0d7b4 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -28,6 +28,9 @@
int main(void) { +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + DEFINE(TSK_XCALL, offsetof(struct task_struct, xcall_enable)); +#endif DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); BLANK(); DEFINE(TSK_TI_CPU, offsetof(struct task_struct, thread_info.cpu)); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a7b4ccd7983e..e99cc2d0ec30 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2375,6 +2375,21 @@ static void mpam_extra_caps(void) __enable_mpam_hcr(); }
+#ifdef CONFIG_ARCH_SUPPORTS_XCALL +static bool is_xcall_support; +static int __init xcall_setup(char *str) +{ + is_xcall_support = true; + return 1; +} +__setup("xcall", xcall_setup); + +static bool has_xcall_support(const struct arm64_cpu_capabilities *entry, int __unused) +{ + return is_xcall_support; +} +#endif + static const struct arm64_cpu_capabilities arm64_features[] = { { .capability = ARM64_ALWAYS_BOOT, @@ -2891,6 +2906,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, TWED, IMP) }, +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + { + .desc = "Xcall Support", + .capability = ARM64_HAS_XCALL, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_xcall_support, + }, #endif {}, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 08274e4317b2..1772e5e16454 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -696,6 +696,31 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) exit_to_user_mode(regs); }
+#ifdef CONFIG_ARCH_SUPPORTS_XCALL +asmlinkage void noinstr el0t_64_xcall_handler(struct pt_regs *regs) +{ + unsigned long flags; + unsigned long esr = read_sysreg(esr_el1); + + switch (ESR_ELx_EC(esr)) { + /* Only support SVC64 for now. */ + case ESR_ELx_EC_SVC64: + fp_user_discard(); + local_daif_restore(DAIF_PROCCTX); + do_el0_xcall(regs, regs->regs[8], __NR_syscalls); + + local_daif_mask(); + + flags = read_thread_flags(); + if (unlikely(flags & _TIF_WORK_MASK)) + do_notify_resume(regs, flags); + break; + default: + el0_inv(regs, esr); + } +} +#endif + static void noinstr el0_svc(struct pt_regs *regs) { enter_from_user_mode(regs); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7fcbee0f6c0e..53fffbee2b88 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -569,9 +569,22 @@ SYM_CODE_START_LOCAL(__bad_stack) SYM_CODE_END(__bad_stack) #endif /* CONFIG_VMAP_STACK */
+#ifdef CONFIG_ARCH_SUPPORTS_XCALL +#include "xcall_entry.S" +#endif
.macro entry_handler el:req, ht:req, regsize:req, label:req SYM_CODE_START_LOCAL(el\el\ht()_\regsize()_\label) +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + .if \el == 0 && \regsize == 64 && \label == sync + /* Only support el0 aarch64 sync exception */ + alternative_if_not ARM64_HAS_XCALL + b .Lxcall_bypass@ + alternative_else_nop_endif + check_xcall_pre_kernel_entry + .Lxcall_bypass@: + .endif +#endif kernel_entry \el, \regsize mov x0, sp bl el\el\ht()_\regsize()_\label()_handler diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 558e9c9da8a4..d07aea72d850 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -14,6 +14,7 @@ #include <asm/syscall.h> #include <asm/thread_info.h> #include <asm/unistd.h> +#include <linux/audit.h>
long a32_arm_syscall(struct pt_regs *regs, int scno); long sys_ni_syscall(void); @@ -162,6 +163,38 @@ static inline void delouse_pt_regs(struct pt_regs *regs) } #endif
+#ifdef CONFIG_ARCH_SUPPORTS_XCALL +static inline bool has_xcall_work(unsigned long flags) +{ + return unlikely(flags & _TIF_XCALL_WORK); +} + +void do_el0_xcall(struct pt_regs *regs, int scno, int sc_nr) +{ + unsigned long flags = read_thread_flags(); + const syscall_fn_t *syscall_table = sys_call_table; + + regs->orig_x0 = regs->regs[0]; + regs->syscallno = scno; + + if (has_xcall_work(flags)) + audit_syscall_entry(regs->syscallno, regs->orig_x0, + regs->regs[1], regs->regs[2], regs->regs[3]); + + if (likely(scno < sc_nr)) { + syscall_fn_t syscall_fn; + int xcall_nr = array_index_nospec(scno, sc_nr); + + syscall_fn = syscall_table[xcall_nr]; + regs->regs[0] = __invoke_syscall(regs, syscall_fn); + } else + regs->regs[0] = do_ni_syscall(regs, scno); + + if (has_xcall_work(flags)) + audit_syscall_exit(regs); +} +#endif + void do_el0_svc(struct pt_regs *regs) { const syscall_fn_t *t = sys_call_table; diff --git a/arch/arm64/kernel/xcall_entry.S b/arch/arm64/kernel/xcall_entry.S new file mode 100644 index 000000000000..d0da98d187c2 --- /dev/null +++ b/arch/arm64/kernel/xcall_entry.S @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Low-level xcall handling code, based on arch/arm64/kernel/entry.S */ + + .macro xcall_entry + + alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT + + stp x0, x1, [sp, #16 * 0] // save x0~x29 + stp x2, x3, [sp, #16 * 1] + stp x4, x5, [sp, #16 * 2] + stp x6, x7, [sp, #16 * 3] + stp x8, x9, [sp, #16 * 4] + stp x10, x11, [sp, #16 * 5] + stp x12, x13, [sp, #16 * 6] + stp x14, x15, [sp, #16 * 7] + stp x16, x17, [sp, #16 * 8] + stp x18, x19, [sp, #16 * 9] + stp x20, x21, [sp, #16 * 10] + stp x22, x23, [sp, #16 * 11] + stp x24, x25, [sp, #16 * 12] + stp x26, x27, [sp, #16 * 13] + stp x28, x29, [sp, #16 * 14] + + clear_gp_regs // clear x0~x29 + mrs x21, sp_el0 + ldr_this_cpu tsk, __entry_task, x20 + msr sp_el0, tsk + +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * Enable IA for in-kernel PAC if the task had it disabled. Although + * this could be implemented with an unconditional MRS which would avoid + * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. + * + * Install the kernel IA key only if IA was enabled in the task. If IA + * was disabled on kernel exit then we would have left the kernel IA + * installed so there is no need to install it again. + */ + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 + b 2f +1: + mrs x0, sctlr_el1 + orr x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: +alternative_else_nop_endif +#endif + + mrs x22, elr_el1 + mrs x23, spsr_el1 + stp lr, x21, [sp, #S_LR] // save LR,USER SP + + stp xzr, xzr, [sp, #S_STACKFRAME] + add x29, sp, #S_STACKFRAME // calc FP + + stp x22, x23, [sp, #S_PC] // save ELR,SPSR + + /* Not in a syscall by default (el0_svc overwrites for real syscall) */ + mov w21, #NO_SYSCALL + str w21, [sp, #S_SYSCALLNO] + +#ifdef CONFIG_ARM64_PSEUDO_NMI + alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_save@ + alternative_else_nop_endif + + mrs_s x20, SYS_ICC_PMR_EL1 + str x20, [sp, #S_PMR_SAVE] + mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET + msr_s SYS_ICC_PMR_EL1, x20 + + .Lskip_pmr_save@: +#endif + .endm + +SYM_CODE_START_LOCAL(ret_to_user_xcall) +#ifdef CONFIG_ARM64_PSEUDO_NMI +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_restore +alternative_else_nop_endif + + ldr x20, [sp, #S_PMR_SAVE] + msr_s SYS_ICC_PMR_EL1, x20 + + /* Ensure priority change is seen by redistributor */ +alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC + dsb sy +alternative_else_nop_endif + +.Lskip_pmr_restore: +#endif + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR + + ldr x23, [sp, #S_SP] // load return stack pointer + msr sp_el0, x23 + +#ifdef CONFIG_ARM64_PTR_AUTH +alternative_if ARM64_HAS_ADDRESS_AUTH + /* + * IA was enabled for in-kernel PAC. Disable it now if needed, or + * alternatively install the user's IA. All other per-task keys and + * SCTLR bits were updated on task switch. + * + * No kernel C function calls after this. + */ + tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f + __ptrauth_keys_install_user tsk, x0, x1, x2 + b 2f +1: + mrs x0, sctlr_el1 + bic x0, x0, SCTLR_ELx_ENIA + msr sctlr_el1, x0 +2: +alternative_else_nop_endif +#endif + + msr elr_el1, x21 // set up the return data + msr spsr_el1, x22 + ldp x0, x1, [sp, #16 * 0] + ldp x2, x3, [sp, #16 * 1] + ldp x4, x5, [sp, #16 * 2] + ldp x6, x7, [sp, #16 * 3] + ldp x8, x9, [sp, #16 * 4] + ldp x10, x11, [sp, #16 * 5] + ldp x12, x13, [sp, #16 * 6] + ldp x14, x15, [sp, #16 * 7] + ldp x16, x17, [sp, #16 * 8] + ldp x18, x19, [sp, #16 * 9] + ldp x20, x21, [sp, #16 * 10] + ldp x22, x23, [sp, #16 * 11] + ldp x24, x25, [sp, #16 * 12] + ldp x26, x27, [sp, #16 * 13] + ldp x28, x29, [sp, #16 * 14] + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + alternative_insn "b .L_skip_tramp_exit_xcall", nop, ARM64_UNMAP_KERNEL_AT_EL0 + + msr far_el1, x29 + + ldr_this_cpu x30, this_cpu_vector, x29 + tramp_alias x29, tramp_exit + msr vbar_el1, x30 // install vector table + ldr lr, [sp, #S_LR] // restore x30 + add sp, sp, #PT_REGS_SIZE // restore sp + br x29 + +.L_skip_tramp_exit_xcall: +#endif + + ldr lr, [sp, #S_LR] + add sp, sp, #PT_REGS_SIZE // restore sp + + /* This must be after the last explicit memory access */ + alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD + tlbi vale1, xzr + dsb nsh + alternative_else_nop_endif + eret +SYM_CODE_END(ret_to_user_xcall) + +SYM_CODE_START_LOCAL(xcall_handler) + xcall_entry + mov x0, sp + bl el0t_64_xcall_handler + b ret_to_user_xcall +SYM_CODE_END(xcall_handler) + + .macro check_xcall_pre_kernel_entry + stp x20, x21, [sp, #0] + /* check ESR_ELx_EC_SVC64 */ + mrs x20, esr_el1 + lsr w20, w20, #ESR_ELx_EC_SHIFT + cmp x20, #ESR_ELx_EC_SVC64 + bne .Lskip_xcall@ + /* x8 >= __NR_syscalls */ + cmp x8, __NR_syscalls + bhs .Lskip_xcall@ + str x8, [sp, #16] + /* x21 = task_struct->xcall_enable */ + ldr_this_cpu x20, __entry_task, x21 + ldr x21, [x20, #TSK_XCALL] + /* x20 = sc_no / 8 */ + lsr x20, x8, 3 + ldr x21, [x21, x20] + /* x8 = sc_no % 8 */ + and x8, x8, 7 + mov x20, 1 + lsl x20, x20, x8 + and x21, x21, x20 + cmp x21, 0 + ldr x8, [sp, #16] + beq .Lskip_xcall@ + ldp x20, x21, [sp, #0] + b xcall_handler +.Lskip_xcall@: + ldp x20, x21, [sp, #0] + .endm diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 2d3df8c73158..1f662b0bc1f7 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -53,6 +53,7 @@ HAS_TLB_RANGE HAS_TWED HAS_VIRT_HOST_EXTN HAS_WFXT +HAS_XCALL HW_DBM KVM_HVHE KVM_PROTECTED_MODE diff --git a/fs/proc/base.c b/fs/proc/base.c index 63fa766e5feb..9e41e0ec4248 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3394,6 +3394,113 @@ static const struct file_operations proc_pid_sg_level_operations = { }; #endif
+#ifdef CONFIG_ARCH_SUPPORTS_XCALL +static int xcall_show(struct seq_file *m, void *v) +{ + struct inode *inode = m->private; + struct task_struct *p; + unsigned int rs, re; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + if (!p->xcall_enable) + goto out; + + seq_printf(m, "Enabled Total[%d/%d]:", bitmap_weight(p->xcall_enable, __NR_syscalls), + __NR_syscalls); + + for (rs = 0, bitmap_next_set_region(p->xcall_enable, &rs, &re, __NR_syscalls); + rs < re; rs = re + 1, + bitmap_next_set_region(p->xcall_enable, &rs, &re, __NR_syscalls)) { + rs == (re - 1) ? seq_printf(m, "%d,", rs) : + seq_printf(m, "%d-%d,", rs, re - 1); + } + seq_printf(m, "\n"); +out: + put_task_struct(p); + + return 0; +} + +static int xcall_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, xcall_show, inode); +} + +static int xcall_enable_one(struct task_struct *p, unsigned int sc_no) +{ + bitmap_set(p->xcall_enable, sc_no, 1); + return 0; +} + +static int xcall_disable_one(struct task_struct *p, unsigned int sc_no) +{ + bitmap_clear(p->xcall_enable, sc_no, 1); + return 0; +} + +static ssize_t xcall_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + struct task_struct *p; + char buffer[TASK_COMM_LEN]; + const size_t maxlen = sizeof(buffer) - 1; + unsigned int sc_no = __NR_syscalls; + int ret = 0, is_clear = 0; + + /* No Support with ILP32 compat task */ +#ifdef CONFIG_ARM64_ILP32 + if (is_ilp32_compat_task()) + return -EACCES; +#endif + + memset(buffer, 0, sizeof(buffer)); + if (!count || copy_from_user(buffer, buf, count > maxlen ? maxlen : count)) + return -EFAULT; + + p = get_proc_task(inode); + if (!p || !p->xcall_enable) + return -ESRCH; + + if (buffer[0] == '!') + is_clear = 1; + + if (kstrtouint(buffer + is_clear, 10, &sc_no)) { + put_task_struct(p); + return -EINVAL; + } + + if (sc_no >= __NR_syscalls) { + put_task_struct(p); + return -EINVAL; + } + + if (!is_clear && !test_bit(sc_no, p->xcall_enable)) + ret = xcall_enable_one(p, sc_no); + else if (is_clear && test_bit(sc_no, p->xcall_enable)) + ret = xcall_disable_one(p, sc_no); + else + ret = -EINVAL; + + put_task_struct(p); + + if (ret) + return ret; + return count; +} + +static const struct file_operations proc_pid_xcall_operations = { + .open = xcall_open, + .read = seq_read, + .write = xcall_write, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + /* * Thread groups */ @@ -3420,6 +3527,9 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_QOS_SCHED_SMART_GRID REG("smart_grid_level", 0644, proc_pid_sg_level_operations), #endif +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + REG("xcall", 0644, proc_pid_xcall_operations), +#endif #ifdef CONFIG_SCHED_AUTOGROUP REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations), #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index b8be76b0c120..66890688cf00 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1606,7 +1606,11 @@ struct task_struct { */ randomized_struct_fields_end
+#if defined(CONFIG_ARCH_SUPPORTS_XCALL) + KABI_USE(1, unsigned long *xcall_enable) +#else KABI_RESERVE(1) +#endif KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4) diff --git a/kernel/fork.c b/kernel/fork.c index a8a30a21799a..885d2c68a6ee 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -636,6 +636,10 @@ void free_task(struct task_struct *tsk) #ifdef CONFIG_QOS_SCHED_SMART_GRID if (smart_grid_enabled()) sched_grid_qos_free(tsk); +#endif +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + if (tsk->xcall_enable) + bitmap_free(tsk->xcall_enable); #endif free_task_struct(tsk); } @@ -1255,6 +1259,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->mm_cid_active = 0; tsk->migrate_from_cpu = -1; #endif + +#ifdef CONFIG_ARCH_SUPPORTS_XCALL + tsk->xcall_enable = NULL; +#endif return tsk;
free_stack: @@ -2417,6 +2425,15 @@ __latent_entropy struct task_struct *copy_process(
rt_mutex_init_task(p);
+#ifdef CONFIG_ARCH_SUPPORTS_XCALL + p->xcall_enable = bitmap_zalloc(__NR_syscalls, GFP_KERNEL); + if (!p->xcall_enable) + goto bad_fork_free; + + if (current->xcall_enable) + bitmap_copy(p->xcall_enable, current->xcall_enable, __NR_syscalls); +#endif + #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY if (dynamic_affinity_enabled()) { retval = sched_prefer_cpus_fork(p, current->prefer_cpus);
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/13266 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/5...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/13266 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/5...