hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IA76NX CVE: NA
--------------------------------
Kprobe and livepatch_wo may modify the first several instructions of a function at the same time which causing a conflict. Since dynamic ftrace reserve instructions at non-notrace functions, we can allow kprobe works on the reserved instructions and livepatch_wo work on other instructions so as to avoid the conflict. But note that we also do not allow both modify the same instruction when a function is marked as 'notrace' and without the reserved instructions.
Determining the order of locks to prevent deadlocks: kprobe_mutex -> klp_mutex -> cpus_read_lock -> text_mutex
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm64/kernel/livepatch.c | 11 +++++ arch/x86/kernel/livepatch.c | 18 +++++++ include/linux/kprobes.h | 8 ++++ include/linux/livepatch.h | 13 ++++++ kernel/kprobes.c | 25 +++++++++- kernel/livepatch/Kconfig | 14 ++++++ kernel/livepatch/core.c | 88 +++++++++++++++++++++++++++++++++++ 7 files changed, 176 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 1e47a66b2f2b..2fd025102975 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -288,3 +288,14 @@ void arch_klp_unpatch_func(struct klp_func *func) do_patch(pc, (unsigned long)next_func->new_func); } } + +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +unsigned long arch_klp_fentry_range_size(void) +{ + /* + * If first instruction is not 'bti', expect fentry exists + * at second instruction otherwise third instruction. + */ + return 3 * AARCH64_INSN_SIZE; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index f560f8b8b1a8..9ebb30a479f2 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -374,3 +374,21 @@ void arch_klp_unpatch_func(struct klp_func *func) /* replace the text with the new text */ klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); } + +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +unsigned long arch_klp_fentry_range_size(void) +{ + unsigned long size = MCOUNT_INSN_SIZE; + + /* + * If CONFIG_X86_KERNEL_IBT enabled, there would be an 'endbr64' + * at function start, then it should be consider into the range + * size. + */ +#ifdef CONFIG_X86_KERNEL_IBT + size += ENDBR_INSN_SIZE; +#endif + /* Expect fentry exists at first instruction. */ + return size; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 8de5d51a0b5e..f79772c5d4b3 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -575,6 +575,14 @@ unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp, } #endif
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void kprobes_lock(void); +void kprobes_unlock(void); +#else /* !CONFIG_LIVEPATCH_ISOLATE_KPROBE */ +static inline void kprobes_lock(void) { } +static inline void kprobes_unlock(void) { } +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + /* Returns true if kprobes handled the fault */ static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, unsigned int trap) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index f04f96b99d50..b82183f21bc9 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -369,4 +369,17 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
#endif /* CONFIG_LIVEPATCH */
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void klp_lock(void); +void klp_unlock(void); +int klp_check_patched(unsigned long addr); +#else /* !CONFIG_LIVEPATCH_ISOLATE_KPROBE */ +static inline void klp_lock(void) { } +static inline void klp_unlock(void) { } +static inline int klp_check_patched(unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + #endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index c2841e595713..a83ae2bb889f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -44,7 +44,9 @@ #include <asm/cacheflush.h> #include <asm/errno.h> #include <linux/uaccess.h> - +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +#include <linux/livepatch.h> +#endif #define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
@@ -1616,6 +1618,18 @@ static int check_kprobe_address_safe(struct kprobe *p, return ret; }
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void kprobes_lock(void) +{ + mutex_lock(&kprobe_mutex); +} + +void kprobes_unlock(void) +{ + mutex_unlock(&kprobe_mutex); +} +#endif + int register_kprobe(struct kprobe *p) { int ret; @@ -1644,6 +1658,12 @@ int register_kprobe(struct kprobe *p) return ret;
mutex_lock(&kprobe_mutex); +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + klp_lock(); + ret = klp_check_patched((unsigned long)p->addr); + if (ret) + goto out; +#endif
if (on_func_entry) p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; @@ -1680,6 +1700,9 @@ int register_kprobe(struct kprobe *p) /* Try to optimize kprobe */ try_to_optimize_kprobe(p); out: +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + klp_unlock(); +#endif mutex_unlock(&kprobe_mutex);
if (probed_mod) diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index ad15685dfd53..239480682c92 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -87,4 +87,18 @@ config LIVEPATCH_RESTRICT_KPROBE and vice versa. Say Y here if you want to check those.
+config LIVEPATCH_ISOLATE_KPROBE + bool "Isolating livepatch and kprobe" + depends on LIVEPATCH_RESTRICT_KPROBE + depends on DYNAMIC_FTRACE && (X86_64 || ARM64) + default n + help + Kprobe and livepatch_wo may modify the first several instructions of + a function at the same time which causing a conflict. Since dynamic + ftrace reserve instructions at non-notrace functions, we can allow + kprobe works on the reserved instructions and livepatch_wo work on + other instructions so as to avoid the conflict. But note that we also + do not allow both modify the same instruction when a function is + marked as 'notrace' and without the reserved instructions. + endmenu diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index a4f6955c1c5a..bb7ecc516b4a 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -951,6 +951,32 @@ static void klp_clear_object_relocs(struct klp_patch *patch, } #endif /* CONFIG_LIVEPATCH_FTRACE */
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +unsigned long __weak arch_klp_fentry_range_size(void) +{ + return 0; +} + +static unsigned long klp_ftrace_location(unsigned long func_addr, + unsigned long func_size) +{ + unsigned long loc; + unsigned long range_size = arch_klp_fentry_range_size(); + + /* + * When some arch not need to modify codes after fentry, they are no + * need to override the weak arch_klp_fentry_range_size() which return + * 0, so just return here. + */ + if (range_size == 0 || range_size > func_size) + return 0; + loc = ftrace_location_range(func_addr, func_addr + range_size); + if (WARN_ON(loc && (loc < func_addr || loc >= func_addr + range_size))) + loc = 0; + return loc; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + /* parts of the initialization that is done only when the object is loaded */ static int klp_init_object_loaded(struct klp_patch *patch, struct klp_object *obj) @@ -975,6 +1001,11 @@ static int klp_init_object_loaded(struct klp_patch *patch, klp_module_enable_ro(patch->mod, true);
klp_for_each_func(obj, func) { +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + unsigned long old_func; + unsigned long ftrace_loc; +#endif + ret = klp_find_object_symbol(obj->name, func->old_name, func->old_sympos, (unsigned long *)&func->old_func); @@ -1005,6 +1036,21 @@ static int klp_init_object_loaded(struct klp_patch *patch, func->old_name); return -ENOENT; } +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + old_func = (unsigned long)func->old_func; + ftrace_loc = klp_ftrace_location(old_func, func->old_size); + if (ftrace_loc) { + if (ftrace_loc >= old_func && + ftrace_loc < old_func + func->old_size - MCOUNT_INSN_SIZE) { + func->old_func = (void *)(ftrace_loc + MCOUNT_INSN_SIZE); + func->old_size -= ((unsigned long)func->old_func - old_func); + } else { + pr_warn("space not enough after ftrace location in '%s'\n", + func->old_name); + } + } +#endif + if (func->old_size < KLP_MAX_REPLACE_SIZE) { pr_err("%s size less than limit (%lu < %zu)\n", func->old_name, func->old_size, KLP_MAX_REPLACE_SIZE); @@ -2312,6 +2358,42 @@ static inline struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) } #endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void klp_lock(void) +{ + mutex_lock(&klp_mutex); +} + +void klp_unlock(void) +{ + mutex_unlock(&klp_mutex); +} + +int klp_check_patched(unsigned long addr) +{ + struct klp_patch *patch; + struct klp_object *obj; + struct klp_func *func; + + lockdep_assert_held(&klp_mutex); + list_for_each_entry(patch, &klp_patches, list) { + if (!patch->enabled) + continue; + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + unsigned long old_func = (unsigned long)func->old_func; + + if (addr >= old_func && addr < old_func + func->old_size) { + pr_err("func %pS has been livepatched\n", (void *)addr); + return -EINVAL; + } + } + } + } + return 0; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + void __weak arch_klp_unpatch_func(struct klp_func *func) { } @@ -2710,6 +2792,9 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
patch = container_of(kobj, struct klp_patch, kobj);
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + kprobes_lock(); +#endif mutex_lock(&klp_mutex);
if (!klp_is_patch_registered(patch)) { @@ -2734,6 +2819,9 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
out: mutex_unlock(&klp_mutex); +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + kprobes_unlock(); +#endif
if (ret) return ret;