hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8MGE6 CVE: NA
--------------------------------
support livepatch without ftrace for ARM64
supported now: livepatch relocation when init_patch after load_module; instruction patched when enable; activeness function check; enforcing the patch stacking principle; long jump (both livepatch relocation and insn patched) module plts request by livepatch-relocation
Signed-off-by: Cheng Jian cj.chengjian@huawei.com Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com Signed-off-by: Dong Kai dongkai11@huawei.com Signed-off-by: Ye Weihua yeweihua4@huawei.com Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm64/Kconfig | 3 + arch/arm64/include/asm/livepatch.h | 48 ++++++ arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/livepatch.c | 225 +++++++++++++++++++++++++++++ 4 files changed, 277 insertions(+) create mode 100644 arch/arm64/include/asm/livepatch.h create mode 100644 arch/arm64/kernel/livepatch.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6062a52a084f..d5ed726a9c5b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -251,6 +251,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select HAVE_LIVEPATCH_WO_FTRACE select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_NMI_SUPPORT @@ -411,6 +412,8 @@ config UNWIND_TABLES
source "arch/arm64/Kconfig.platforms"
+source "kernel/livepatch/Kconfig" + menu "Kernel Features"
menu "ARM errata workarounds via the alternatives framework" diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h new file mode 100644 index 000000000000..0fbfaad1f31f --- /dev/null +++ b/arch/arm64/include/asm/livepatch.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2014-2019, Huawei. + * Author: Li Bin huawei.libin@huawei.com + * Author: Cheng Jian cj.chengjian@huawei.com + * Copyright (C) 2023 Huawei. + * Author: Zheng Yejian zhengyejian1@huawei.com + * + * livepatch.h - arm64-specific Kernel Live Patching Core + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see http://www.gnu.org/licenses/. + */ + +#ifndef _ASM_ARM64_LIVEPATCH_H +#define _ASM_ARM64_LIVEPATCH_H + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + +#define LJMP_INSN_SIZE 4 + +struct arch_klp_data { + u32 old_insns[LJMP_INSN_SIZE]; +}; + +#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) + +struct klp_func; + +#define klp_smp_isb() isb() +int arch_klp_patch_func(struct klp_func *func); +void arch_klp_unpatch_func(struct klp_func *func); +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +bool arch_check_jump_insn(unsigned long func_addr); +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data); +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + +#endif /* _ASM_ARM64_LIVEPATCH_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index d95b3d6b471a..f5225dd3e7ab 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_CPU_PM) += sleep.o suspend.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o +obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c new file mode 100644 index 000000000000..c2f8da8bfec9 --- /dev/null +++ b/arch/arm64/kernel/livepatch.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * livepatch.c - arm64-specific Kernel Live Patching Core + * + * Copyright (C) 2014 Li Bin huawei.libin@huawei.com + * Copyright (C) 2023 Zheng Yejian zhengyejian1@huawei.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see http://www.gnu.org/licenses/. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/livepatch.h> +#include <asm/livepatch.h> +#include <asm/stacktrace.h> +#include <linux/slab.h> +#include <asm/insn.h> +#include <asm-generic/sections.h> +#include <asm/patching.h> +#include <asm/debug-monitors.h> +#include <linux/sched/debug.h> +#include <linux/kallsyms.h> + +#define CHECK_JUMP_RANGE LJMP_INSN_SIZE + +static inline bool offset_in_range(unsigned long pc, unsigned long addr, + long range) +{ + long offset = addr - pc; + + return (offset >= -range && offset < range); +} + +/* + * The instruction set on arm64 is A64. + * The instruction of BLR is 1101011000111111000000xxxxx00000. + * The instruction of BL is 100101xxxxxxxxxxxxxxxxxxxxxxxxxx. + * The instruction of BLRAX is 1101011x0011111100001xxxxxxxxxxx. + */ +#define is_jump_insn(insn) (((le32_to_cpu(insn) & 0xfffffc1f) == 0xd63f0000) || \ + ((le32_to_cpu(insn) & 0xfc000000) == 0x94000000) || \ + ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800)) + +bool arch_check_jump_insn(unsigned long func_addr) +{ + unsigned long i; + u32 *insn = (u32 *)func_addr; + + for (i = 0; i < CHECK_JUMP_RANGE; i++) { + if (is_jump_insn(*insn)) + return true; + insn++; + } + return false; +} + +static bool klp_check_jump_func(void *ws_args, unsigned long pc) +{ + struct walk_stackframe_args *args = ws_args; + + return args->check_func(args->data, &args->ret, pc); +} + +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) +{ + arch_stack_walk(fn, args, t, NULL); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + +static int do_check_calltrace(struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) +{ + int ret; + struct task_struct *g, *t; + unsigned int cpu; + + for_each_process_thread(g, t) { + if (klp_is_migration_thread(t->comm)) + continue; + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } + return 0; +} + +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + return do_check_calltrace(&args, klp_check_jump_func); +} + +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) +{ + long ret; + int i; + + for (i = 0; i < LJMP_INSN_SIZE; i++) { + ret = aarch64_insn_read(((u32 *)old_func) + i, + &arch_data->old_insns[i]); + if (ret) + break; + } + return ret; +} + +static int klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + int ret; + + if (len <= 0) + return -EINVAL; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) { + ret = aarch64_insn_patch_text_nosync(dst + i, src[i]); + if (ret) + return ret; + } + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + return aarch64_insn_patch_text_nosync(dst, src[0]); +} + +static int do_patch(unsigned long pc, unsigned long new_addr) +{ + u32 insns[LJMP_INSN_SIZE]; + int ret; + + if (offset_in_range(pc, new_addr, SZ_128M)) { + insns[0] = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + ret = klp_patch_text((u32 *)pc, insns, 1); + if (ret) { + pr_err("patch instruction small range failed, ret=%d\n", ret); + return -EPERM; + } + } else { + /* movn x16, #0x.... */ + /* movk x16, #0x...., lsl #16 */ + /* movk x16, #0x...., lsl #32 */ + /* br x16 */ + insns[0] = 0x92800010 | (((~new_addr) & 0xffff)) << 5; + insns[1] = 0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5; + insns[2] = 0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5; + insns[3] = 0xd61f0200; + ret = klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("patch instruction large range failed, ret=%d\n", ret); + return -EPERM; + } + } + return 0; +} + +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + int ret; + + func_node = func->func_node; + list_add_rcu(&func->stack_node, &func_node->func_stack); + ret = do_patch((unsigned long)func->old_func, (unsigned long)func->new_func); + if (ret) + list_del_rcu(&func->stack_node); + return ret; +} + +void arch_klp_unpatch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + struct klp_func *next_func; + unsigned long pc; + int ret; + + func_node = func->func_node; + pc = (unsigned long)func_node->old_func; + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; + } + } else { + next_func = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + if (WARN_ON(!next_func)) + return; + do_patch(pc, (unsigned long)next_func->new_func); + } +}