hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7W46D
--------------------------------
support livepatch without ftrace for x86_64
supported now: livepatch relocation when init_patch after load_module; instruction patched when enable; activeness function check; enforcing the patch stacking principle;
x86_64 use variable length instruction, so there's no need to consider extra implementation for long jumps.
Signed-off-by: Cheng Jian cj.chengjian@huawei.com Signed-off-by: Li Bin huawei.libin@huawei.com Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com Signed-off-by: Ye Weihua yeweihua4@huawei.com Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/x86/configs/openeuler_defconfig | 4 +- arch/x86/include/asm/livepatch.h | 33 + arch/x86/kernel/Makefile | 1 + arch/x86/kernel/livepatch.c | 289 ++++++++ include/linux/livepatch.h | 87 ++- kernel/livepatch/Kconfig | 21 +- kernel/livepatch/core.c | 989 ++++++++++++++++++++++++++- 7 files changed, 1388 insertions(+), 36 deletions(-) create mode 100644 arch/x86/include/asm/livepatch.h create mode 100644 arch/x86/kernel/livepatch.c
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 25af826a4609..d702023ff3c5 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -484,9 +484,9 @@ CONFIG_LEGACY_VSYSCALL_XONLY=y # CONFIG_CMDLINE_BOOL is not set CONFIG_MODIFY_LDT_SYSCALL=y # CONFIG_STRICT_SIGALTSTACK_SIZE is not set -CONFIG_HAVE_LIVEPATCH_FTRACE=y +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y CONFIG_LIVEPATCH=y -CONFIG_LIVEPATCH_FTRACE=y +CONFIG_LIVEPATCH_WO_FTRACE=y # end of Processor type and features
CONFIG_FUNCTION_PADDING_CFI=11 diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h new file mode 100644 index 000000000000..359c06ad1cc2 --- /dev/null +++ b/arch/x86/include/asm/livepatch.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * livepatch.h - x86-specific Kernel Live Patching Core + * + * Copyright (C) 2023 Huawei. + */ + +#ifndef _ASM_X86_LIVEPATCH_H +#define _ASM_X86_LIVEPATCH_H + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + +#define JMP_E9_INSN_SIZE 5 +struct arch_klp_data { + unsigned char old_insns[JMP_E9_INSN_SIZE]; +}; + +#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) + +struct klp_func; + +#define klp_smp_isb() +int arch_klp_patch_func(struct klp_func *func); +void arch_klp_unpatch_func(struct klp_func *func); +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +bool arch_check_jump_insn(unsigned long func_addr); +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data); +void arch_klp_code_modify_prepare(void); +void arch_klp_code_modify_post_process(void); + +#endif /* CONFIG_LIVEPATCH_WO_FTRACE */ + +#endif /* _ASM_X86_LIVEPATCH_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 4070a01c11b7..7a114f41fe7a 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_SMP) += setup_percpu.o obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-y += apic/ obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o +obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c new file mode 100644 index 000000000000..a877ffdf96ac --- /dev/null +++ b/arch/x86/kernel/livepatch.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * livepatch.c - x86-specific Kernel Live Patching Core + * + * Copyright (C) 2023 Huawei Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see http://www.gnu.org/licenses/. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/livepatch.h> +#include <linux/stacktrace.h> +#include <linux/memory.h> +#include <asm/text-patching.h> +#include <asm/stacktrace.h> +#include <asm/set_memory.h> +#include <asm/insn.h> + +#include <linux/slab.h> +#include <asm/nops.h> +#include <asm/sections.h> +#include <linux/kprobes.h> + +/* + * The instruction set on x86 is CISC. + * The instructions of call in same segment are 11101000(direct), + * 11111111(register indirect) and 11111111(memory indirect). + * The instructions of call in other segment are 10011010(direct), + * 11111111(indirect). + */ +static bool is_jump_insn(u8 *insn) +{ + if ((insn[0] == 0xE8) || (insn[0] == 0x9a)) + return true; + else if ((insn[0] == 0xFF) && ((insn[1] & 0x30) == 0x10)) + return true; + return false; +} + +bool arch_check_jump_insn(unsigned long func_addr) +{ + int len = JMP_E9_INSN_SIZE; + struct insn insn; + u8 *addr = (u8 *)func_addr; + + do { + if (is_jump_insn(addr)) + return true; + insn_init(&insn, addr, MAX_INSN_SIZE, 1); + insn_get_length(&insn); + if (!insn.length || !insn_complete(&insn)) + return true; + len -= insn.length; + addr += insn.length; + } while (len > 0); + + return false; +} + +static void klp_print_stack_trace(void *trace_ptr, int trace_len) +{ + int i; +#ifdef CONFIG_ARCH_STACKWALK + unsigned long *trace = trace_ptr; +#else + struct stack_trace *trace = trace_ptr; +#endif + + pr_err("Call Trace:\n"); +#ifdef CONFIG_ARCH_STACKWALK + for (i = 0; i < trace_len; i++) { + pr_err("[<%pK>] %pS\n", + (void *)trace[i], + (void *)trace[i]); + } +#else + for (i = 0; i < trace->nr_entries; i++) { + pr_err("[<%pK>] %pS\n", + (void *)trace->entries[i], + (void *)trace->entries[i]); + } +#endif + +} + +#ifdef MAX_STACK_ENTRIES +#undef MAX_STACK_ENTRIES +#endif +#define MAX_STACK_ENTRIES 100 + +static int klp_check_stack(void *trace_ptr, int trace_len, + bool (*fn)(void *, int *, unsigned long), void *data) +{ +#ifdef CONFIG_ARCH_STACKWALK + unsigned long *trace = trace_ptr; +#else + struct stack_trace *trace = trace_ptr; +#endif + unsigned long address; + int i, ret; + +#ifdef CONFIG_ARCH_STACKWALK + for (i = 0; i < trace_len; i++) { + address = trace[i]; +#else + for (i = 0; i < trace->nr_entries; i++) { + address = trace->entries[i]; +#endif + if (!fn(data, &ret, address)) { +#ifdef CONFIG_ARCH_STACKWALK + klp_print_stack_trace(trace_ptr, trace_len); +#else + klp_print_stack_trace(trace_ptr, 0); +#endif + return ret; + } + } + + return 0; +} + +static int check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) +{ + int ret = 0; + static unsigned long trace_entries[MAX_STACK_ENTRIES]; +#ifdef CONFIG_ARCH_STACKWALK + int trace_len; +#else + struct stack_trace trace; +#endif + +#ifdef CONFIG_ARCH_STACKWALK + ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES); + if (ret < 0) { + pr_err("%s:%d has an unreliable stack, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + trace_len = ret; + ret = klp_check_stack(trace_entries, trace_len, fn, data); +#else + trace.skip = 0; + trace.nr_entries = 0; + trace.max_entries = MAX_STACK_ENTRIES; + trace.entries = trace_entries; + ret = save_stack_trace_tsk_reliable(t, &trace); + if (ret) { + pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", + __func__, t->comm, t->pid, ret); + return ret; + } + ret = klp_check_stack(&trace, 0, fn, data); +#endif + if (ret) { + pr_err("%s:%d check stack failed, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + return 0; +} + +static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +{ + int ret = 0; + struct task_struct *g, *t; + unsigned int cpu; + + for_each_process_thread(g, t) { + if (klp_is_migration_thread(t->comm)) + continue; + + ret = check_task_calltrace(t, fn, data); + if (ret) + return ret; + } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), fn, data); + if (ret) + return ret; + } + return 0; +} + +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + return do_check_calltrace(check_func, data); +} + +void arch_klp_code_modify_prepare(void) + __acquires(&text_mutex) +{ + mutex_lock(&text_mutex); +} + +void arch_klp_code_modify_post_process(void) + __releases(&text_mutex) +{ + text_poke_sync(); + mutex_unlock(&text_mutex); +} + +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) +{ + long ret; + + /* Prevent text modification */ + mutex_lock(&text_mutex); + ret = copy_from_kernel_nofault(arch_data->old_insns, + old_func, JMP_E9_INSN_SIZE); + mutex_unlock(&text_mutex); + + return ret; +} + +static void klp_patch_text(void *dst, const void *src, int len) +{ + if (len <= 1) + return; + /* skip breakpoint at first */ + text_poke(dst + 1, src + 1, len - 1); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + /* update jmp opcode */ + text_poke(dst, src, 1); +} + +static void *klp_jmp_code(unsigned long ip, unsigned long addr) +{ + return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); +} + +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + unsigned long ip, new_addr; + unsigned char *new; + + func_node = func->func_node; + ip = (unsigned long)func->old_func; + list_add_rcu(&func->stack_node, &func_node->func_stack); + new_addr = (unsigned long)func->new_func; + /* replace the text with the new text */ + new = (unsigned char *)klp_jmp_code(ip, new_addr); + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); + return 0; +} + +void arch_klp_unpatch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + struct klp_func *next_func; + unsigned long ip, new_addr; + void *new; + + func_node = func->func_node; + ip = (unsigned long)func_node->old_func; + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { + new = func_node->arch_data.old_insns; + } else { + next_func = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + + new_addr = (unsigned long)next_func->new_func; + new = klp_jmp_code(ip, new_addr); + } + + /* replace the text with the new text */ + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); +} diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index f635bd6b2c6f..5610b56ff74e 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -17,11 +17,17 @@
#if IS_ENABLED(CONFIG_LIVEPATCH)
+#include <asm/livepatch.h> + /* task patch states */ #define KLP_UNDEFINED -1 #define KLP_UNPATCHED 0 #define KLP_PATCHED 1
+#define KLP_NORMAL_FORCE 0 +#define KLP_ENFORCEMENT 1 +#define KLP_STACK_OPTIMIZE 2 + /** * struct klp_func - function structure for live patching * @old_name: name of the function to be patched @@ -65,6 +71,7 @@ struct klp_func { * in kallsyms for the given object is used. */ unsigned long old_sympos; + int force; /* Only used in the solution without ftrace */
/* internal */ void *old_func; @@ -72,16 +79,14 @@ struct klp_func { struct list_head node; struct list_head stack_node; unsigned long old_size, new_size; -#ifdef CONFIG_LIVEPATCH_FTRACE - bool nop; -#endif + bool nop; /* Not used in the solution without ftrace */ bool patched; #ifdef CONFIG_LIVEPATCH_FTRACE bool transition; #endif + void *func_node; /* Only used in the solution without ftrace */ };
-#ifdef CONFIG_LIVEPATCH_FTRACE struct klp_object;
/** @@ -105,7 +110,6 @@ struct klp_callbacks { void (*post_unpatch)(struct klp_object *obj); bool post_unpatch_enabled; }; -#endif /* CONFIG_LIVEPATCH_FTRACE */
/** * struct klp_object - kernel object structure for live patching @@ -124,22 +128,17 @@ struct klp_object { /* external */ const char *name; struct klp_func *funcs; -#ifdef CONFIG_LIVEPATCH_FTRACE - struct klp_callbacks callbacks; -#endif + struct klp_callbacks callbacks; /* Not used in the solution without ftrace */
/* internal */ struct kobject kobj; struct list_head func_list; struct list_head node; struct module *mod; -#ifdef CONFIG_LIVEPATCH_FTRACE - bool dynamic; -#endif + bool dynamic; /* Not used in the solution without ftrace */ bool patched; };
-#ifdef CONFIG_LIVEPATCH_FTRACE /** * struct klp_state - state of the system modified by the livepatch * @id: system state identifier (non-zero) @@ -151,7 +150,6 @@ struct klp_state { unsigned int version; void *data; }; -#endif /* CONFIG_LIVEPATCH_FTRACE */
/** * struct klp_patch - patch structure for live patching @@ -171,19 +169,15 @@ struct klp_patch { /* external */ struct module *mod; struct klp_object *objs; -#ifdef CONFIG_LIVEPATCH_FTRACE - struct klp_state *states; - bool replace; -#endif + struct klp_state *states; /* Not used in the solution without ftrace */ + bool replace; /* Not used in the solution without ftrace */
/* internal */ struct list_head list; struct kobject kobj; struct list_head obj_list; bool enabled; -#ifdef CONFIG_LIVEPATCH_FTRACE - bool forced; -#endif + bool forced; /* Not used in the solution without ftrace */ struct work_struct free_work; struct completion finish; }; @@ -208,9 +202,9 @@ struct klp_patch { #define klp_for_each_func(obj, func) \ list_for_each_entry(func, &obj->func_list, node)
+#ifdef CONFIG_LIVEPATCH_FTRACE int klp_enable_patch(struct klp_patch *);
-#ifdef CONFIG_LIVEPATCH_FTRACE /* Called from the module loader during module coming/going states */ int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); @@ -249,6 +243,57 @@ struct klp_state *klp_get_prev_state(unsigned long id);
#else /* !CONFIG_LIVEPATCH_FTRACE */
+struct klp_func_node { + struct list_head node; + struct list_head func_stack; + void *old_func; + struct arch_klp_data arch_data; +}; + +static inline +int klp_compare_address(unsigned long pc, unsigned long func_addr, + const char *func_name, unsigned long check_size) +{ + if (pc >= func_addr && pc < func_addr + check_size) { + pr_warn("func %s is in use!\n", func_name); + /* Return -EAGAIN for next retry */ + return -EAGAIN; + } + return 0; +} + +typedef int (*klp_add_func_t)(struct list_head *func_list, + unsigned long func_addr, unsigned long func_size, + const char *func_name, int force); + +struct walk_stackframe_args { + void *data; + int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); +}; + +#ifndef klp_smp_isb +#define klp_smp_isb() +#endif + +#define KLP_MIGRATION_NAME_PREFIX "migration/" +static inline bool klp_is_migration_thread(const char *task_name) +{ + /* + * current on other CPU + * we call this in stop_machine, so the current + * of each CPUs is migration, just compare the + * task_comm here, because we can't get the + * cpu_curr(task_cpu(t))). This assumes that no + * other thread will pretend to be a stopper via + * task_comm. + */ + return !strncmp(task_name, KLP_MIGRATION_NAME_PREFIX, + sizeof(KLP_MIGRATION_NAME_PREFIX) - 1); +} + +int klp_register_patch(struct klp_patch *patch); +int klp_unregister_patch(struct klp_patch *patch); static inline int klp_module_coming(struct module *mod) { return 0; } static inline void klp_module_going(struct module *mod) {} static inline bool klp_patch_pending(struct task_struct *task) { return false; } diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index 8ffe3b9ca1f9..d67a3e59a372 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -13,11 +13,12 @@ menu "Enable Livepatch"
config LIVEPATCH bool "Kernel Live Patching" - depends on (HAVE_LIVEPATCH_FTRACE && (DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS)) || HAVE_LIVEPATCH_WO_FTRACE + depends on (HAVE_LIVEPATCH_FTRACE && (DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS)) || (HAVE_LIVEPATCH_WO_FTRACE && DEBUG_INFO) depends on MODULES depends on SYSFS depends on KALLSYMS_ALL depends on !TRIM_UNUSED_KSYMS + default n help Say Y here if you want to support kernel live patching. This option has no runtime impact until a kernel "patch" @@ -37,6 +38,7 @@ choice config LIVEPATCH_FTRACE bool "based on ftrace" depends on HAVE_LIVEPATCH_FTRACE + depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS help Supports kernel live patching based on ftrace. This is the original implementation of kernel live @@ -46,6 +48,8 @@ config LIVEPATCH_FTRACE config LIVEPATCH_WO_FTRACE bool "without ftrace" depends on HAVE_LIVEPATCH_WO_FTRACE + depends on DEBUG_INFO + select LIVEPATCH_STOP_MACHINE_CONSISTENCY help Supports kernel live patching without ftrace. This solution will patch the first few instructions @@ -57,4 +61,19 @@ config LIVEPATCH_WO_FTRACE
endchoice
+config LIVEPATCH_STOP_MACHINE_CONSISTENCY + bool "Stop machine consistency" + depends on LIVEPATCH_WO_FTRACE + help + Use stop machine consistency model + stop-machine consistency and kpatch's stack + trace checking. + +config LIVEPATCH_STACK + bool "Enforcing the patch stacking principle" + depends on LIVEPATCH_WO_FTRACE + default y + help + Say N here if you want to remove the patch stacking principle. + endmenu diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 844c0bd4e3e1..c01bf3529284 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -4,6 +4,7 @@ * * Copyright (C) 2014 Seth Jennings sjenning@redhat.com * Copyright (C) 2014 SUSE + * Copyright (C) 2023 Huawei Inc. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -26,6 +27,10 @@ #include "patch.h" #include "state.h" #include "transition.h" +#else /* !CONFIG_LIVEPATCH_FTRACE */ +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/stop_machine.h> #endif /* CONFIG_LIVEPATCH_FTRACE */
/* @@ -53,6 +58,7 @@ static bool klp_is_module(struct klp_object *obj) return obj->name; }
+#ifdef CONFIG_LIVEPATCH_FTRACE /* sets obj->mod if object is not vmlinux and module is found */ static void klp_find_object_module(struct klp_object *obj) { @@ -80,6 +86,9 @@ static void klp_find_object_module(struct klp_object *obj)
rcu_read_unlock_sched(); } +#else /* !CONFIG_LIVEPATCH_FTRACE */ +static int klp_find_object_module(struct klp_object *obj); +#endif /* CONFIG_LIVEPATCH_FTRACE */
static bool klp_initialized(void) { @@ -407,16 +416,8 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
#else /* !CONFIG_LIVEPATCH_FTRACE */
-static int __klp_enable_patch(struct klp_patch *patch) -{ - return -EINVAL; -} - static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, - const char *buf, size_t count) -{ - return -EINVAL; -} + const char *buf, size_t count);
#endif /* CONFIG_LIVEPATCH_FTRACE */
@@ -711,6 +712,12 @@ static void __klp_free_objects(struct klp_patch *patch, bool nops_only) struct klp_object *obj, *tmp_obj;
klp_for_each_object_safe(patch, obj, tmp_obj) { +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + if (klp_is_module(obj) && obj->mod) { + module_put(obj->mod); + obj->mod = NULL; + } +#endif __klp_free_funcs(obj, nops_only); #ifdef CONFIG_LIVEPATCH_FTRACE if (nops_only && !obj->dynamic) @@ -910,11 +917,25 @@ static int klp_init_object_loaded(struct klp_patch *patch,
ret = kallsyms_lookup_size_offset((unsigned long)func->old_func, &func->old_size, NULL); +#ifdef CONFIG_LIVEPATCH_FTRACE if (!ret) { pr_err("kallsyms size lookup failed for '%s'\n", func->old_name); return -ENOENT; } +#else /* !CONFIG_LIVEPATCH_FTRACE */ + if (!ret || ((long)func->old_size < 0)) { + pr_err("kallsyms size lookup failed for '%s'\n", + func->old_name); + return -ENOENT; + } + if (func->old_size < KLP_MAX_REPLACE_SIZE) { + pr_err("%s size less than limit (%lu < %zu)\n", func->old_name, + func->old_size, KLP_MAX_REPLACE_SIZE); + return -EINVAL; + } +#endif /* CONFIG_LIVEPATCH_FTRACE */ + #ifdef CONFIG_LIVEPATCH_FTRACE if (func->nop) func->new_func = func->old_func; @@ -931,6 +952,7 @@ static int klp_init_object_loaded(struct klp_patch *patch, return 0; }
+#ifdef CONFIG_LIVEPATCH_FTRACE static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) { struct klp_func *func; @@ -961,12 +983,18 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
return ret; } +#else /* !CONFIG_LIVEPATCH_FTRACE */ +static int klp_init_object(struct klp_patch *patch, struct klp_object *obj); +#endif /* CONFIG_LIVEPATCH_FTRACE */
static void klp_init_func_early(struct klp_object *obj, struct klp_func *func) { kobject_init(&func->kobj, &klp_ktype_func); list_add_tail(&func->node, &obj->func_list); +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + func->func_node = NULL; +#endif }
static void klp_init_object_early(struct klp_patch *patch, @@ -975,6 +1003,9 @@ static void klp_init_object_early(struct klp_patch *patch, INIT_LIST_HEAD(&obj->func_list); kobject_init(&obj->kobj, &klp_ktype_object); list_add_tail(&obj->node, &patch->obj_list); +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + obj->mod = NULL; +#endif }
static void klp_init_patch_early(struct klp_patch *patch) @@ -1116,7 +1147,6 @@ static int __klp_enable_patch(struct klp_patch *patch) klp_cancel_transition(); return ret; } -#endif /* CONFIG_LIVEPATCH_FTRACE */
/** * klp_enable_patch() - enable the livepatch @@ -1200,7 +1230,6 @@ int klp_enable_patch(struct klp_patch *patch) } EXPORT_SYMBOL_GPL(klp_enable_patch);
-#ifdef CONFIG_LIVEPATCH_FTRACE /* * This function unpatches objects from the replaced livepatches. * @@ -1384,7 +1413,6 @@ void klp_module_going(struct module *mod)
mutex_unlock(&klp_mutex); } -#endif /* CONFIG_LIVEPATCH_FTRACE */
static int __init klp_init(void) { @@ -1395,4 +1423,941 @@ static int __init klp_init(void) return 0; }
+#else /* !CONFIG_LIVEPATCH_FTRACE */ + +struct patch_data { + struct klp_patch *patch; + atomic_t cpu_count; +}; + +static bool klp_is_patch_registered(struct klp_patch *patch) +{ + struct klp_patch *mypatch; + + list_for_each_entry(mypatch, &klp_patches, list) + if (mypatch == patch) + return true; + + return false; +} + +static int state_show(struct seq_file *m, void *v) +{ + struct klp_patch *patch; + char *state; + int index = 0; + + seq_printf(m, "%-5s\t%-26s\t%-8s\n", "Index", "Patch", "State"); + seq_puts(m, "-----------------------------------------------\n"); + mutex_lock(&klp_mutex); + list_for_each_entry(patch, &klp_patches, list) { + if (patch->enabled) + state = "enabled"; + else + state = "disabled"; + + seq_printf(m, "%-5d\t%-26s\t%-8s\n", ++index, + patch->mod->name, state); + } + mutex_unlock(&klp_mutex); + seq_puts(m, "-----------------------------------------------\n"); + + return 0; +} + +static int klp_state_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, state_show, NULL); +} + +static const struct proc_ops proc_klpstate_operations = { + .proc_open = klp_state_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int klp_find_object_module(struct klp_object *obj) +{ + struct module *mod; + + if (!klp_is_module(obj)) + return 0; + + rcu_read_lock_sched(); + /* + * We do not want to block removal of patched modules and therefore + * we do not take a reference here. The patches are removed by + * klp_module_going() instead. + */ + mod = find_module(obj->name); + if (!mod) { + pr_err("module '%s' not loaded\n", obj->name); + rcu_read_unlock_sched(); + return -ENOPKG; /* the deponds module is not loaded */ + } + + if (mod->state == MODULE_STATE_COMING || !try_module_get(mod)) { + rcu_read_unlock_sched(); + return -EINVAL; + } + + obj->mod = mod; + + rcu_read_unlock_sched(); + return 0; +} + +static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) +{ + struct klp_func *func; + int ret; + const char *name; + + if (klp_is_module(obj) && strnlen(obj->name, MODULE_NAME_LEN) >= MODULE_NAME_LEN) { + pr_err("obj name is too long\n"); + return -EINVAL; + } + klp_for_each_func(obj, func) { + if (!func->old_name) { + pr_err("old name is invalid\n"); + return -EINVAL; + } + /* + * NOPs get the address later. The patched module must be loaded, + * see klp_init_object_loaded(). + */ + if (!func->new_func && !func->nop) { + pr_err("new_func is invalid\n"); + return -EINVAL; + } + if (strlen(func->old_name) >= KSYM_NAME_LEN) { + pr_err("function old name is too long\n"); + return -EINVAL; + } + } + + obj->patched = false; + obj->mod = NULL; + + ret = klp_find_object_module(obj); + if (ret) + return ret; + + name = klp_is_module(obj) ? obj->name : "vmlinux"; + ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name); + if (ret) + goto out; + + /* + * For livepatch without ftrace, we need to modify the first N + * instructions of the to-be-patched func. So should check if the + * func length enough to allow this modification. + * + * We add check hook in klp_init_func and will using the old_size + * internally, so the klp_init_object_loaded should called first + * to fill the klp_func struct. + */ + if (klp_is_object_loaded(obj)) { + ret = klp_init_object_loaded(patch, obj); + if (ret) + goto out; + } + + klp_for_each_func(obj, func) { + ret = klp_init_func(obj, func); + if (ret) + goto out; + } + + return 0; + +out: + if (klp_is_module(obj)) { + module_put(obj->mod); + obj->mod = NULL; + } + return ret; +} + +int __weak arch_klp_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +{ + return -EINVAL; +} + +bool __weak arch_check_jump_insn(unsigned long func_addr) +{ + return true; +} + +int __weak arch_klp_check_activeness_func(struct klp_func *func, int enable, + klp_add_func_t add_func, + struct list_head *func_list) +{ + int ret; + unsigned long func_addr, func_size; + struct klp_func_node *func_node = NULL; + + func_node = func->func_node; + /* Check func address in stack */ + if (enable) { + if (func->patched || func->force == KLP_ENFORCEMENT) + return 0; + /* + * When enable, checking the currently active functions. + */ + if (list_empty(&func_node->func_stack)) { + /* + * Not patched on this function [the origin one] + */ + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + } else { + /* + * Previously patched function [the active one] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + /* + * When preemption is disabled and the replacement area + * does not contain a jump instruction, the migration + * thread is scheduled to run stop machine only after the + * execution of instructions to be replaced is complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + (func->force == KLP_NORMAL_FORCE) || + arch_check_jump_insn(func_addr)) { + ret = add_func(func_list, func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; + } + } else { +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement instructions. Therefore, + * when preemption is not enabled, atomic execution is performed + * and these instructions will not appear on the stack. + */ + if (list_is_singular(&func_node->func_stack)) { + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + } else { + struct klp_func *prev; + + prev = list_first_or_null_rcu( + &func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; +#endif + + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + } + return 0; +} + +static inline unsigned long klp_size_to_check(unsigned long func_size, + int force) +{ + unsigned long size = func_size; + + if (force == KLP_STACK_OPTIMIZE && size > KLP_MAX_REPLACE_SIZE) + size = KLP_MAX_REPLACE_SIZE; + return size; +} + +struct actv_func { + struct list_head list; + unsigned long func_addr; + unsigned long func_size; + const char *func_name; + int force; +}; + +static bool check_func_list(void *data, int *ret, unsigned long pc) +{ + struct list_head *func_list = (struct list_head *)data; + struct actv_func *func = NULL; + + list_for_each_entry(func, func_list, list) { + *ret = klp_compare_address(pc, func->func_addr, func->func_name, + klp_size_to_check(func->func_size, func->force)); + if (*ret) + return false; + } + return true; +} + +static int add_func_to_list(struct list_head *func_list, unsigned long func_addr, + unsigned long func_size, const char *func_name, + int force) +{ + struct actv_func *func = kzalloc(sizeof(struct actv_func), GFP_ATOMIC); + + if (!func) + return -ENOMEM; + func->func_addr = func_addr; + func->func_size = func_size; + func->func_name = func_name; + func->force = force; + list_add_tail(&func->list, func_list); + return 0; +} + +static void free_func_list(struct list_head *func_list) +{ + struct actv_func *func = NULL; + struct actv_func *tmp = NULL; + + list_for_each_entry_safe(func, tmp, func_list, list) { + list_del(&func->list); + kfree(func); + } +} + +static int klp_check_activeness_func(struct klp_patch *patch, int enable, + struct list_head *func_list) +{ + int ret; + struct klp_object *obj = NULL; + struct klp_func *func = NULL; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + ret = arch_klp_check_activeness_func(func, enable, + add_func_to_list, + func_list); + if (ret) + return ret; + } + } + return 0; +} + +static int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + LIST_HEAD(func_list); + + ret = klp_check_activeness_func(patch, enable, &func_list); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + + if (list_empty(&func_list)) + goto out; + + ret = arch_klp_check_calltrace(check_func_list, (void *)&func_list); + +out: + free_func_list(&func_list); + return ret; +} + +static LIST_HEAD(klp_func_list); + +/* + * The caller must ensure that the klp_mutex lock is held or is in the rcu read + * critical area. + */ +static struct klp_func_node *klp_find_func_node(const void *old_func) +{ + struct klp_func_node *func_node; + + list_for_each_entry_rcu(func_node, &klp_func_list, node, + lockdep_is_held(&klp_mutex)) { + if (func_node->old_func == old_func) + return func_node; + } + + return NULL; +} + +static void klp_add_func_node(struct klp_func_node *func_node) +{ + list_add_rcu(&func_node->node, &klp_func_list); +} + +static void klp_del_func_node(struct klp_func_node *func_node) +{ + list_del_rcu(&func_node->node); +} + +void __weak *arch_klp_mem_alloc(size_t size) +{ + return kzalloc(size, GFP_ATOMIC); +} + +void __weak arch_klp_mem_free(void *mem) +{ + kfree(mem); +} + +long __weak arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) +{ + return -EINVAL; +} + +static struct klp_func_node *func_node_alloc(struct klp_func *func) +{ + long ret; + struct klp_func_node *func_node = NULL; + + func_node = klp_find_func_node(func->old_func); + if (func_node) /* The old_func has ever been patched */ + return func_node; + func_node = arch_klp_mem_alloc(sizeof(struct klp_func_node)); + if (func_node) { + INIT_LIST_HEAD(&func_node->func_stack); + func_node->old_func = func->old_func; + /* + * Module which contains 'old_func' would not be removed because + * it's reference count has been held during registration. + * But it's not in stop_machine context here, 'old_func' should + * not be modified as saving old code. + */ + ret = arch_klp_save_old_code(&func_node->arch_data, func->old_func); + if (ret) { + arch_klp_mem_free(func_node); + pr_err("save old code failed, ret=%ld\n", ret); + return NULL; + } + klp_add_func_node(func_node); + } + return func_node; +} + +static void func_node_free(struct klp_func *func) +{ + struct klp_func_node *func_node; + + func_node = func->func_node; + if (func_node) { + func->func_node = NULL; + if (list_empty(&func_node->func_stack)) { + klp_del_func_node(func_node); + synchronize_rcu(); + arch_klp_mem_free(func_node); + } + } +} + +static void klp_mem_recycle(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func_node_free(func); + } + } +} + +static int klp_mem_prepare(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func->func_node = func_node_alloc(func); + if (func->func_node == NULL) { + klp_mem_recycle(patch); + pr_err("alloc func_node failed\n"); + return -ENOMEM; + } + } + } + return 0; +} + +void __weak arch_klp_unpatch_func(struct klp_func *func) +{ +} + +int __weak arch_klp_patch_func(struct klp_func *func) +{ + return -EINVAL; +} + +static void klp_unpatch_func(struct klp_func *func) +{ + if (WARN_ON(!func->patched)) + return; + if (WARN_ON(!func->old_func)) + return; + if (WARN_ON(!func->func_node)) + return; + + arch_klp_unpatch_func(func); + + func->patched = false; +} + +static inline int klp_patch_func(struct klp_func *func) +{ + int ret = 0; + + if (func->patched) + return 0; + if (WARN_ON(!func->old_func)) + return -EINVAL; + if (WARN_ON(!func->func_node)) + return -EINVAL; + + ret = arch_klp_patch_func(func); + if (!ret) + func->patched = true; + + return ret; +} + +static void klp_unpatch_object(struct klp_object *obj) +{ + struct klp_func *func; + + klp_for_each_func(obj, func) { + if (func->patched) + klp_unpatch_func(func); + } + obj->patched = false; +} + +static int klp_patch_object(struct klp_object *obj) +{ + struct klp_func *func; + int ret; + + if (obj->patched) + return 0; + + klp_for_each_func(obj, func) { + ret = klp_patch_func(func); + if (ret) { + klp_unpatch_object(obj); + return ret; + } + } + obj->patched = true; + + return 0; +} + +static void klp_unpatch_objects(struct klp_patch *patch) +{ + struct klp_object *obj; + + klp_for_each_object(patch, obj) + if (obj->patched) + klp_unpatch_object(obj); +} + +void __weak arch_klp_code_modify_prepare(void) +{ +} + +void __weak arch_klp_code_modify_post_process(void) +{ +} + +static int klp_stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) +{ + int ret; + + /* + * Cpu hotplug locking is a "percpu" rw semaphore, however write + * lock and read lock on it are globally mutual exclusive, that is + * cpus_write_lock() on one cpu can block all cpus_read_lock() + * on other cpus, vice versa. + * + * Since cpu hotplug take the cpus_write_lock() before text_mutex, + * here take cpus_read_lock() before text_mutex to avoid deadlock. + */ + cpus_read_lock(); + arch_klp_code_modify_prepare(); + ret = stop_machine_cpuslocked(fn, data, cpus); + arch_klp_code_modify_post_process(); + cpus_read_unlock(); + return ret; +} + +static int disable_patch(struct klp_patch *patch) +{ + pr_notice("disabling patch '%s'\n", patch->mod->name); + + klp_unpatch_objects(patch); + patch->enabled = false; + module_put(patch->mod); + return 0; +} + +static int klp_try_disable_patch(void *data) +{ + int ret = 0; + struct patch_data *pd = (struct patch_data *)data; + + if (atomic_inc_return(&pd->cpu_count) == 1) { + struct klp_patch *patch = pd->patch; + + ret = klp_check_calltrace(patch, 0); + if (ret) { + atomic_inc(&pd->cpu_count); + return ret; + } + ret = disable_patch(patch); + if (ret) { + atomic_inc(&pd->cpu_count); + return ret; + } + atomic_inc(&pd->cpu_count); + } else { + while (atomic_read(&pd->cpu_count) <= num_online_cpus()) + cpu_relax(); + + klp_smp_isb(); + } + + return ret; +} + +static int __klp_disable_patch(struct klp_patch *patch) +{ + int ret; + struct patch_data patch_data = { + .patch = patch, + .cpu_count = ATOMIC_INIT(0), + }; + + if (WARN_ON(!patch->enabled)) + return -EINVAL; + +#ifdef CONFIG_LIVEPATCH_STACK + /* enforce stacking: only the last enabled patch can be disabled */ + if (!list_is_last(&patch->list, &klp_patches) && + list_next_entry(patch, list)->enabled) { + pr_err("only the last enabled patch can be disabled\n"); + return -EBUSY; + } +#endif + + ret = klp_stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask); + if (ret) + return ret; + + klp_mem_recycle(patch); + return 0; +} + +/* + * This function is called from stop_machine() context. + */ +static int enable_patch(struct klp_patch *patch) +{ + struct klp_object *obj; + int ret; + + pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n"); + add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK); + + if (!patch->enabled) { + if (!try_module_get(patch->mod)) + return -ENODEV; + + patch->enabled = true; + + pr_notice("enabling patch '%s'\n", patch->mod->name); + } + + klp_for_each_object(patch, obj) { + if (!klp_is_object_loaded(obj)) + continue; + + ret = klp_patch_object(obj); + if (ret) { + pr_warn("failed to patch object '%s'\n", + klp_is_module(obj) ? obj->name : "vmlinux"); + goto disable; + } + } + + return 0; + +disable: + disable_patch(patch); + return ret; +} + +static int klp_try_enable_patch(void *data) +{ + int ret = 0; + struct patch_data *pd = (struct patch_data *)data; + + if (atomic_inc_return(&pd->cpu_count) == 1) { + struct klp_patch *patch = pd->patch; + + ret = klp_check_calltrace(patch, 1); + if (ret) { + atomic_inc(&pd->cpu_count); + return ret; + } + ret = enable_patch(patch); + if (ret) { + atomic_inc(&pd->cpu_count); + return ret; + } + atomic_inc(&pd->cpu_count); + } else { + while (atomic_read(&pd->cpu_count) <= num_online_cpus()) + cpu_relax(); + + klp_smp_isb(); + } + + return ret; +} + +static int __klp_enable_patch(struct klp_patch *patch) +{ + int ret; + struct patch_data patch_data = { + .patch = patch, + .cpu_count = ATOMIC_INIT(0), + }; + + if (WARN_ON(patch->enabled)) + return -EINVAL; + +#ifdef CONFIG_LIVEPATCH_STACK + /* enforce stacking: only the first disabled patch can be enabled */ + if (patch->list.prev != &klp_patches && + !list_prev_entry(patch, list)->enabled) { + pr_err("only the first disabled patch can be enabled\n"); + return -EBUSY; + } +#endif + + ret = klp_mem_prepare(patch); + if (ret) + return ret; + + ret = klp_stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); + if (ret) + goto err_out; + +#ifndef CONFIG_LIVEPATCH_STACK + /* move the enabled patch to the list tail */ + list_del(&patch->list); + list_add_tail(&patch->list, &klp_patches); +#endif + + return 0; + +err_out: + klp_mem_recycle(patch); + return ret; +} + + +static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct klp_patch *patch; + int ret; + bool enabled; + + ret = kstrtobool(buf, &enabled); + if (ret) + return ret; + + patch = container_of(kobj, struct klp_patch, kobj); + + mutex_lock(&klp_mutex); + + if (!klp_is_patch_registered(patch)) { + /* + * Module with the patch could either disappear meanwhile or is + * not properly initialized yet. + */ + ret = -EINVAL; + goto out; + } + + if (patch->enabled == enabled) { + /* already in requested state */ + ret = -EINVAL; + goto out; + } + + if (enabled) + ret = __klp_enable_patch(patch); + else + ret = __klp_disable_patch(patch); + +out: + mutex_unlock(&klp_mutex); + + if (ret) + return ret; + return count; +} + +/** + * klp_register_patch() - registers a patch + * @patch: Patch to be registered + * + * Initializes the data structure associated with the patch and + * creates the sysfs interface. + * + * Return: 0 on success, otherwise error + */ +int klp_register_patch(struct klp_patch *patch) +{ + int ret; + struct klp_object *obj; + + if (!patch) { + pr_err("patch invalid\n"); + return -EINVAL; + } + if (!patch->mod) { + pr_err("patch->mod invalid\n"); + return -EINVAL; + } + if (!patch->objs) { + pr_err("patch->objs invalid\n"); + return -EINVAL; + } + + klp_for_each_object_static(patch, obj) { + if (!obj->funcs) { + pr_err("obj->funcs invalid\n"); + return -EINVAL; + } + } + + if (!is_livepatch_module(patch->mod)) { + pr_err("module %s is not marked as a livepatch module\n", + patch->mod->name); + return -EINVAL; + } + + if (!klp_initialized()) { + pr_err("kernel live patch not available\n"); + return -ENODEV; + } + + mutex_lock(&klp_mutex); + + if (klp_is_patch_registered(patch)) { + mutex_unlock(&klp_mutex); + return -EINVAL; + } + + klp_init_patch_early(patch); + + ret = klp_init_patch(patch); + if (ret) + goto err; + + mutex_unlock(&klp_mutex); + + return 0; + +err: + klp_free_patch_start(patch); + + mutex_unlock(&klp_mutex); + + kobject_put(&patch->kobj); + wait_for_completion(&patch->finish); + + return ret; +} +EXPORT_SYMBOL_GPL(klp_register_patch); + +/** + * klp_unregister_patch() - unregisters a patch + * @patch: Disabled patch to be unregistered + * + * Frees the data structures and removes the sysfs interface. + * + * Return: 0 on success, otherwise error + */ +int klp_unregister_patch(struct klp_patch *patch) +{ + int ret = 0; + + mutex_lock(&klp_mutex); + + if (!klp_is_patch_registered(patch)) { + ret = -EINVAL; + goto out; + } + + if (patch->enabled) { + ret = -EBUSY; + goto out; + } + + klp_free_patch_start(patch); + + mutex_unlock(&klp_mutex); + + kobject_put(&patch->kobj); + wait_for_completion(&patch->finish); + + return 0; +out: + mutex_unlock(&klp_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(klp_unregister_patch); + +static int __init klp_init(void) +{ + struct proc_dir_entry *root_klp_dir, *res; + + root_klp_dir = proc_mkdir("livepatch", NULL); + if (!root_klp_dir) + goto error_out; + + res = proc_create("livepatch/state", 0, NULL, + &proc_klpstate_operations); + if (!res) + goto error_remove; + + klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); + if (!klp_root_kobj) + goto error_remove_state; + + return 0; + +error_remove_state: + remove_proc_entry("livepatch/state", NULL); +error_remove: + remove_proc_entry("livepatch", NULL); +error_out: + return -ENOMEM; +} + +#endif /* CONFIG_LIVEPATCH_FTRACE */ + module_init(klp_init);