hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Kprobe and livepatch_wo may modify the first several instructions of a function at the same time which causing a conflict. Since dynamic ftrace reserve instructions at non-notrace functions, we can allow kprobe works on the reserved instructions and livepatch_wo work on other instructions so as to avoid the conflict. But note that we also do not allow both modify the same instruction when a function is marked as 'notrace' and without the reserved instructions.
Determining the order of locks to prevent deadlocks: kprobe_mutex -> klp_mutex -> cpus_read_lock -> text_mutex
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- include/linux/kprobes.h | 8 +++++ include/linux/livepatch.h | 13 ++++++++ kernel/kprobes.c | 25 +++++++++++++++- kernel/livepatch/Kconfig | 14 +++++++++ kernel/livepatch/core.c | 62 ++++++++++++++++++++++++++++++++++++++- 5 files changed, 120 insertions(+), 2 deletions(-)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 18b7c40ffb37..e2713fe9c100 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -496,6 +496,14 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void kprobes_lock(void); +void kprobes_unlock(void); +#else /* !CONFIG_LIVEPATCH_ISOLATE_KPROBE */ +static inline void kprobes_lock(void) { } +static inline void kprobes_unlock(void) { } +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + /* Returns true if kprobes handled the fault */ static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, unsigned int trap) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index a5ef153bae8d..ad402ea7a3e4 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -364,4 +364,17 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
#endif /* CONFIG_LIVEPATCH */
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void klp_lock(void); +void klp_unlock(void); +int klp_check_patched(unsigned long addr); +#else /* !CONFIG_LIVEPATCH_ISOLATE_KPROBE */ +static inline void klp_lock(void) { } +static inline void klp_unlock(void) { } +static inline int klp_check_patched(unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + #endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f29d7cc3eb01..f83b8b6b829f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -42,7 +42,9 @@ #include <asm/cacheflush.h> #include <asm/errno.h> #include <linux/uaccess.h> - +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +#include <linux/livepatch.h> +#endif #define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
@@ -1695,6 +1697,18 @@ static int check_kprobe_address_safe(struct kprobe *p, return ret; }
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void kprobes_lock(void) +{ + mutex_lock(&kprobe_mutex); +} + +void kprobes_unlock(void) +{ + mutex_unlock(&kprobe_mutex); +} +#endif + int register_kprobe(struct kprobe *p) { int ret; @@ -1722,6 +1736,12 @@ int register_kprobe(struct kprobe *p) return ret;
mutex_lock(&kprobe_mutex); +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + klp_lock(); + ret = klp_check_patched((unsigned long)p->addr); + if (ret) + goto out; +#endif
old_p = get_kprobe(p->addr); if (old_p) { @@ -1755,6 +1775,9 @@ int register_kprobe(struct kprobe *p) /* Try to optimize kprobe */ try_to_optimize_kprobe(p); out: +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + klp_unlock(); +#endif mutex_unlock(&kprobe_mutex);
if (probed_mod) diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index a59cbb6506cb..55f8ba227e6b 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -109,5 +109,19 @@ config LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE service downtime. Say N if you are unsure.
+config LIVEPATCH_ISOLATE_KPROBE + bool "Isolating livepatch and kprobe" + depends on LIVEPATCH_RESTRICT_KPROBE + depends on DYNAMIC_FTRACE && (X86_64 || ARM64) + default n + help + Kprobe and livepatch_wo may modify the first several instructions of + a function at the same time which causing a conflict. Since dynamic + ftrace reserve instructions at non-notrace functions, we can allow + kprobe works on the reserved instructions and livepatch_wo work on + other instructions so as to avoid the conflict. But note that we also + do not allow both modify the same instruction when a function is + marked as 'notrace' and without the reserved instructions. + endmenu endif diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index fdf00f2a6d84..3f3fa1c3c80c 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -97,6 +97,42 @@ static inline struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) } #endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void klp_lock(void) +{ + mutex_lock(&klp_mutex); +} + +void klp_unlock(void) +{ + mutex_unlock(&klp_mutex); +} + +int klp_check_patched(unsigned long addr) +{ + struct klp_patch *patch; + struct klp_object *obj; + struct klp_func *func; + + lockdep_assert_held(&klp_mutex); + list_for_each_entry(patch, &klp_patches, list) { + if (!patch->enabled) + continue; + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + unsigned long old_func = (unsigned long)func->old_func; + + if (addr >= old_func && addr < old_func + func->old_size) { + pr_err("func %pS has been livepatched\n", (void *)addr); + return -EINVAL; + } + } + } + } + return 0; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + static bool klp_is_module(struct klp_object *obj) { return obj->name; @@ -483,6 +519,9 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
patch = container_of(kobj, struct klp_patch, kobj);
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + kprobes_lock(); +#endif mutex_lock(&klp_mutex);
if (!klp_is_patch_registered(patch)) { @@ -507,7 +546,9 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
out: mutex_unlock(&klp_mutex); - +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + kprobes_unlock(); +#endif if (ret) return ret; return count; @@ -1028,6 +1069,11 @@ static int klp_init_object_loaded(struct klp_patch *patch, module_enable_ro(patch->mod, true);
klp_for_each_func(obj, func) { +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + unsigned long old_func; + unsigned long ftrace_loc; +#endif + ret = klp_find_object_symbol(obj->name, func->old_name, func->old_sympos, (unsigned long *)&func->old_func); @@ -1041,6 +1087,20 @@ static int klp_init_object_loaded(struct klp_patch *patch, func->old_name); return -ENOENT; } +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + old_func = (unsigned long)func->old_func; + ftrace_loc = ftrace_location_range(old_func, old_func + func->old_size - 1); + if (ftrace_loc) { + if (WARN_ON(ftrace_loc < old_func || + ftrace_loc >= old_func + func->old_size - MCOUNT_INSN_SIZE)) { + pr_err("ftrace location for '%s' invalid", func->old_name); + return -EINVAL; + } + func->old_func = (void *)(ftrace_loc + MCOUNT_INSN_SIZE); + func->old_size -= ((unsigned long)func->old_func - old_func); + } +#endif + #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY if (func->old_size < KLP_MAX_REPLACE_SIZE) { pr_err("%s size less than limit (%lu < %zu)\n", func->old_name,