From: yangerkun yangerkun@huawei.com
hulk inclusion category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
Backport for io_uring will extend the syscall number, which will change KABI like bpf_trace_run1. Fix it by hack the syscall in do_syscall_64.
Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Reviewed-by: Chen Zhou chenzhou10@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- arch/x86/entry/common.c | 7 +++++++ arch/x86/entry/syscalls/syscall_32.tbl | 3 --- arch/x86/entry/syscalls/syscall_64.tbl | 3 --- arch/x86/include/asm/syscall_wrapper.h | 3 +++ 4 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 8353348ddeaf..0723098a3961 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -291,6 +291,13 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs) if (likely(nr < NR_syscalls)) { nr = array_index_nospec(nr, NR_syscalls); regs->ax = sys_call_table[nr](regs); + } else { + if (nr == 425) + regs->ax = __x64_sys_io_uring_setup(regs); + else if (likely(nr == 426)) + regs->ax = __x64_sys_io_uring_enter(regs); + else if (nr == 427) + regs->ax = __x64_sys_io_uring_register(regs); }
syscall_return_slowpath(regs); diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 2eefd2a7c1ce..3cf7b533b3d1 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -398,6 +398,3 @@ 384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl 385 i386 io_pgetevents sys_io_pgetevents __ia32_compat_sys_io_pgetevents 386 i386 rseq sys_rseq __ia32_sys_rseq -425 i386 io_uring_setup sys_io_uring_setup __ia32_sys_io_uring_setup -426 i386 io_uring_enter sys_io_uring_enter __ia32_sys_io_uring_enter -427 i386 io_uring_register sys_io_uring_register __ia32_sys_io_uring_register diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 65c026185e61..f0b1709a5ffb 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -343,9 +343,6 @@ 332 common statx __x64_sys_statx 333 common io_pgetevents __x64_sys_io_pgetevents 334 common rseq __x64_sys_rseq -425 common io_uring_setup __x64_sys_io_uring_setup -426 common io_uring_enter __x64_sys_io_uring_enter -427 common io_uring_register __x64_sys_io_uring_register
# # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h index 90eb70df0b18..46e125b2d08a 100644 --- a/arch/x86/include/asm/syscall_wrapper.h +++ b/arch/x86/include/asm/syscall_wrapper.h @@ -206,5 +206,8 @@ struct pt_regs; asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs); asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs); asmlinkage long __x64_sys_time(const struct pt_regs *regs); +asmlinkage long __x64_sys_io_uring_setup(const struct pt_regs *regs); +asmlinkage long __x64_sys_io_uring_enter(const struct pt_regs *regs); +asmlinkage long __x64_sys_io_uring_register(const struct pt_regs *regs);
#endif /* _ASM_X86_SYSCALL_WRAPPER_H */