Link: https://gitee.com/openeuler/kernel/issues/I9R2TB
v1->v2: Fix format warning and a bug in check_task_calltrace() of ppc32 & ppc64
Masami Hiramatsu (3): x86/unwind: Recover kretprobe trampoline entry x86/unwind: Compile kretprobe fixup code only if CONFIG_KRETPROBES=y arm64: Recover kretprobe modified return address in stacktrace
Zheng Yejian (30): livepatch: Move 'struct klp_func_list' out of arch livepatch/x86: Move 'struct klp_func_list' related codes out of arch livepatch/arm: Remove duplicate 'struct klp_func_list' related codes livepatch/arm64: Remove duplicate 'struct klp_func_list' related codes livepatch/ppc32: Remove duplicate 'struct klp_func_list' related codes livepatch/ppc64: Remove duplicate 'struct klp_func_list' related codes livepatch/x86: Implement arch_klp_check_task_calltrace() livepatch/arm: Implement arch_klp_check_task_calltrace() livepatch/arm64: Implement arch_klp_check_task_calltrace() livepatch/ppc32: Implement arch_klp_check_task_calltrace() livepatch/ppc64: Implement arch_klp_check_task_calltrace() livepatch/x86: Ajust instruction replace order for KLP_STACK_OPTIMIZE mode livepatch/arm: Adjust instruction replace order for KLP_STACK_OPTIMIZE mode livepatch/arm64: Adjust instruction replace order for KLP_STACK_OPTIMIZE mode livepatch/ppc32: Adjust instruction replace order for KLP_STACK_OPTIMIZE mode livepatch/ppc64: Adjust instruction replace order for KLP_STACK_OPTIMIZE mode livepatch/core: No stop machine in KLP_STACK_OPTIMIZE mode livepatch: Complete check calltrace for running tasks livepatch: Check calltrace of idle tasks livepatch: Organize active functions with struct 'list_head' livepatch: Fix huge_depth in arch_klp_check_activeness_func() livepatch: Use func->func_node directly livepatch/core: Make several functions to be static livepatch: Fix warning C_RULE_ID_SINGLE_BRANCH_IF_AND_LOOP_BRACKET livepatch: Reduce duplicate definition of 'struct walk_stackframe_args' ftrace: Fix possible use-after-free issue in ftrace_location() kprobes: Fix possible use-after-free issue on kprobe registration livepatch: Avoid patching conflicts with kprobes kprobes: Add kretprobe_find_ret_addr() for searching return address livepatch: Enable CONFIG_LIVEPATCH_ISOLATE_KPROBE in openeuler_defconfig
arch/arm/include/asm/livepatch.h | 6 - arch/arm/kernel/livepatch.c | 334 +++++------------ arch/arm64/configs/openeuler_defconfig | 1 + arch/arm64/include/asm/livepatch.h | 4 - arch/arm64/kernel/livepatch.c | 360 ++++++------------- arch/arm64/kernel/stacktrace.c | 4 + arch/powerpc/include/asm/livepatch.h | 6 +- arch/powerpc/kernel/livepatch.c | 22 ++ arch/powerpc/kernel/livepatch_32.c | 366 +++++-------------- arch/powerpc/kernel/livepatch_64.c | 434 ++++++++++------------- arch/powerpc/kernel/module_64.c | 29 +- arch/x86/configs/openeuler_defconfig | 1 + arch/x86/include/asm/livepatch.h | 5 - arch/x86/include/asm/unwind.h | 26 ++ arch/x86/kernel/livepatch.c | 320 ++++------------- arch/x86/kernel/unwind_frame.c | 3 +- arch/x86/kernel/unwind_guess.c | 3 +- arch/x86/kernel/unwind_orc.c | 21 +- include/linux/kprobes.h | 24 ++ include/linux/livepatch.h | 30 +- kernel/kprobes.c | 66 +++- kernel/livepatch/Kconfig | 25 ++ kernel/livepatch/core.c | 473 ++++++++++++++++++++++++- kernel/trace/ftrace.c | 39 +- 24 files changed, 1241 insertions(+), 1361 deletions(-)
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
We need an interface to check calltrace of a certain task to find if the task is running in the old function that ready to be patched.
Since klp_check_calltrace() do the similar thing except that it checks all tasks, We can do little refactor on it to extract a function like klp_check_task_calltrace().
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/kernel/livepatch.c | 8 -------- arch/arm64/kernel/livepatch.c | 8 -------- arch/powerpc/kernel/livepatch_32.c | 8 -------- arch/powerpc/kernel/livepatch_64.c | 8 -------- arch/x86/kernel/livepatch.c | 8 -------- include/linux/livepatch.h | 7 +++++++ 6 files changed, 7 insertions(+), 40 deletions(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index b1711d947dfe..1c27b7b15e3b 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -64,14 +64,6 @@ static bool is_jump_insn(u32 insn) return false; }
-struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 5b0171254820..237680e4ac5b 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -57,14 +57,6 @@ static inline bool offset_in_range(unsigned long pc, unsigned long addr, ((le32_to_cpu(insn) & 0xfc000000) == 0x94000000) || \ ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800))
-struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 134b08e12e74..9ad9d92a4422 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -54,14 +54,6 @@ static bool is_jump_insn(u32 insn) return false; }
-struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index b33839b5916a..bc9de72f71c3 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -59,14 +59,6 @@ static bool is_jump_insn(u32 insn) return false; }
-struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 43404fc1fdbb..fa5880d3cc22 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -52,14 +52,6 @@ static bool is_jump_insn(u8 *insn) return false; }
-struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - static inline unsigned long klp_size_to_check(unsigned long func_size, int force) { diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 56ad1c1dd83e..23fb19d74311 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -259,6 +259,13 @@ int klp_compare_address(unsigned long pc, unsigned long func_addr, void arch_klp_init(void); int klp_module_delete_safety_check(struct module *mod);
+struct klp_func_list { + struct klp_func_list *next; + unsigned long func_addr; + unsigned long func_size; + const char *func_name; + int force; +}; #endif
int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/x86/include/asm/livepatch.h | 5 -- arch/x86/kernel/livepatch.c | 96 ++++---------------------------- include/linux/livepatch.h | 5 ++ kernel/livepatch/core.c | 88 ++++++++++++++++++++++++++++- 4 files changed, 102 insertions(+), 92 deletions(-)
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index e2cef5b2d8aa..5ffd1de9ce48 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -26,11 +26,6 @@ int arch_klp_patch_func(struct klp_func *func); void arch_klp_unpatch_func(struct klp_func *func); #endif
-#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif - - #if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined(CONFIG_LIVEPATCH_WO_FTRACE)
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index fa5880d3cc22..1fc4ac526943 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -52,16 +52,6 @@ static bool is_jump_insn(u8 *insn) return false; }
-static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > JMP_E9_INSN_SIZE) - size = JMP_E9_INSN_SIZE; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { int len = JMP_E9_INSN_SIZE; @@ -82,32 +72,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -160,7 +126,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -200,7 +166,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -214,7 +180,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable,
func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -255,21 +221,6 @@ static void klp_print_stack_trace(void *trace_ptr, int trace_len) #endif #define MAX_STACK_ENTRIES 100
-static bool check_func_list(void *data, int *ret, unsigned long pc) -{ - struct klp_func_list *funcs = (struct klp_func_list *)data; - - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - static int klp_check_stack(void *trace_ptr, int trace_len, bool (*fn)(void *, int *, unsigned long), void *data) { @@ -301,17 +252,6 @@ static int klp_check_stack(void *trace_ptr, int trace_len, return 0; }
-static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; - - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } -} - static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) { struct task_struct *g, *t; @@ -360,27 +300,6 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da return 0; }
-int klp_check_calltrace(struct klp_patch *patch, int enable) -{ - int ret = 0; - struct klp_func_list *check_funcs = NULL; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - - if (!check_funcs) - goto out; - - ret = do_check_calltrace(check_func_list, (void *)check_funcs); - -out: - free_list(&check_funcs); - return ret; -} - static bool check_module_calltrace(void *data, int *ret, unsigned long pc) { struct module *mod = (struct module *)data; @@ -393,6 +312,11 @@ static bool check_module_calltrace(void *data, int *ret, unsigned long pc) return true; }
+int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + return do_check_calltrace(check_func, data); +} + int arch_klp_module_check_calltrace(void *data) { return do_check_calltrace(check_module_calltrace, data); diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 23fb19d74311..b4cf90c03d29 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -266,6 +266,11 @@ struct klp_func_list { const char *func_name; int force; }; + +typedef int (*klp_add_func_t)(struct klp_func_list **funcs, struct klp_func_list **func, + unsigned long func_addr, unsigned long func_size, + const char *func_name, int force); + #endif
int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 978dcede41a1..050c75d59f21 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1339,11 +1339,97 @@ static int __klp_disable_patch(struct klp_patch *patch) return 0; } #elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) -int __weak klp_check_calltrace(struct klp_patch *patch, int enable) +int __weak arch_klp_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +{ + return -EINVAL; +} + +int __weak arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, + struct klp_func_list **func_list) +{ + return -EINVAL; +} + +static inline unsigned long klp_size_to_check(unsigned long func_size, + int force) +{ + unsigned long size = func_size; + + if (force == KLP_STACK_OPTIMIZE && size > KLP_MAX_REPLACE_SIZE) + size = KLP_MAX_REPLACE_SIZE; + return size; +} + +static bool check_func_list(void *data, int *ret, unsigned long pc) { + struct klp_func_list *funcs = (struct klp_func_list *)data; + + while (funcs != NULL) { + *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, + klp_size_to_check(funcs->func_size, funcs->force)); + if (*ret) + return false; + funcs = funcs->next; + } + return true; +} + +static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, + unsigned long func_addr, unsigned long func_size, const char *func_name, + int force) +{ + if (*func == NULL) { + *funcs = kzalloc(sizeof(**funcs), GFP_ATOMIC); + if (!(*funcs)) + return -ENOMEM; + *func = *funcs; + } else { + (*func)->next = kzalloc(sizeof(**funcs), GFP_ATOMIC); + if (!(*func)->next) + return -ENOMEM; + *func = (*func)->next; + } + (*func)->func_addr = func_addr; + (*func)->func_size = func_size; + (*func)->func_name = func_name; + (*func)->force = force; + (*func)->next = NULL; return 0; }
+static void free_func_list(struct klp_func_list **funcs) +{ + struct klp_func_list *p; + + while (*funcs != NULL) { + p = *funcs; + *funcs = (*funcs)->next; + kfree(p); + } +} + +int __weak klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *func_list = NULL; + + ret = arch_klp_check_activeness_func(patch, enable, add_func_to_list, &func_list); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + + if (!func_list) + goto out; + + ret = arch_klp_check_calltrace(check_func_list, (void *)func_list); + +out: + free_func_list(&func_list); + return ret; +} + static LIST_HEAD(klp_func_list);
/*
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/include/asm/livepatch.h | 6 -- arch/arm/kernel/livepatch.c | 116 ++++++------------------------- 2 files changed, 21 insertions(+), 101 deletions(-)
diff --git a/arch/arm/include/asm/livepatch.h b/arch/arm/include/asm/livepatch.h index 445a78d83d21..08ff5246f97d 100644 --- a/arch/arm/include/asm/livepatch.h +++ b/arch/arm/include/asm/livepatch.h @@ -34,11 +34,6 @@ struct klp_func; int arch_klp_patch_func(struct klp_func *func); void arch_klp_unpatch_func(struct klp_func *func);
-#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif - - #if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY)
#ifdef CONFIG_ARM_MODULE_PLTS @@ -63,7 +58,6 @@ int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_module_check_calltrace(void *data); - #endif
#endif /* _ASM_ARM_LIVEPATCH_H */ diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 1c27b7b15e3b..32f6813108f6 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -39,7 +39,6 @@ #define ARM_INSN_SIZE 4 #endif
-#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * ARM_INSN_SIZE) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE
#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY @@ -65,22 +64,11 @@ static bool is_jump_insn(u32 insn) }
struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; + void *data; int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); };
-static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -95,32 +83,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -176,7 +140,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -218,7 +182,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -232,7 +196,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, #endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -243,36 +207,11 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, return 0; }
-static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return true; - } - funcs = funcs->next; - } - return false; -} - -static int klp_check_jump_func(struct stackframe *frame, void *data) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; + struct walk_stackframe_args *args = ws_args;
- return check_func_list(check_funcs, &args->ret, frame->pc); -} - -static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; - - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } + return !args->check_func(args->data, &args->ret, frame->pc); }
static int do_check_calltrace(struct walk_stackframe_args *args, @@ -305,37 +244,24 @@ static int do_check_calltrace(struct walk_stackframe_args *args, return 0; }
-int klp_check_calltrace(struct klp_patch *patch, int enable) +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { - .enable = enable, - .ret = 0 + .data = data, + .ret = 0, + .check_func = check_func, };
- ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; - - args.check_funcs = check_funcs; - ret = do_check_calltrace(&args, klp_check_jump_func); - -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); }
-static int check_module_calltrace(struct stackframe *frame, void *data) +static int check_module_calltrace(struct stackframe *frame, void *ws_args) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data;
- if (within_module_core(frame->pc, args->mod)) { - pr_err("module %s is in use!\n", args->mod->name); + if (within_module_core(frame->pc, mod)) { + pr_err("module %s is in use!\n", mod->name); return (args->ret = -EBUSY); } return 0; @@ -344,7 +270,7 @@ static int check_module_calltrace(struct stackframe *frame, void *data) int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 };
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm64/include/asm/livepatch.h | 4 - arch/arm64/kernel/livepatch.c | 115 ++++++----------------------- 2 files changed, 21 insertions(+), 98 deletions(-)
diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h index c41a22adc944..2bacd12e46b1 100644 --- a/arch/arm64/include/asm/livepatch.h +++ b/arch/arm64/include/asm/livepatch.h @@ -41,9 +41,6 @@ static inline int klp_check_compiler_support(void)
int arch_klp_patch_func(struct klp_func *func); void arch_klp_unpatch_func(struct klp_func *func); -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif #else #error Live patching support is disabled; check CONFIG_LIVEPATCH #endif @@ -72,7 +69,6 @@ int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_module_check_calltrace(void *data); - #endif
#endif /* _ASM_ARM64_LIVEPATCH_H */ diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 237680e4ac5b..dbcc3f7c3347 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -35,7 +35,6 @@ #include <linux/sched/debug.h> #include <linux/kallsyms.h>
-#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE
static inline bool offset_in_range(unsigned long pc, unsigned long addr, @@ -58,22 +57,11 @@ static inline bool offset_in_range(unsigned long pc, unsigned long addr, ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800))
struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; + void *data; int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); };
-static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -88,32 +76,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list *)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list *)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -165,7 +129,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -208,7 +172,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -223,7 +187,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable,
func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -234,36 +198,11 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, return 0; }
-static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static bool klp_check_jump_func(void *ws_args, unsigned long pc) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - -static bool klp_check_jump_func(void *data, unsigned long pc) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; - - return check_func_list(check_funcs, &args->ret, pc); -} - -static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; + struct walk_stackframe_args *args = ws_args;
- while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } + return args->check_func(args->data, &args->ret, pc); }
static int do_check_calltrace(struct walk_stackframe_args *args, @@ -301,36 +240,24 @@ static int do_check_calltrace(struct walk_stackframe_args *args, return 0; }
-int klp_check_calltrace(struct klp_patch *patch, int enable) +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { - .enable = enable, - .ret = 0 + .data = data, + .ret = 0, + .check_func = check_func, };
- ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; - - args.check_funcs = check_funcs; - ret = do_check_calltrace(&args, klp_check_jump_func); -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); }
-static bool check_module_calltrace(void *data, unsigned long pc) +static bool check_module_calltrace(void *ws_args, unsigned long pc) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data;
- if (within_module_core(pc, args->mod)) { - pr_err("module %s is in use!\n", args->mod->name); + if (within_module_core(pc, mod)) { + pr_err("module %s is in use!\n", mod->name); args->ret = -EBUSY; return false; } @@ -340,7 +267,7 @@ static bool check_module_calltrace(void *data, unsigned long pc) int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 };
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/powerpc/include/asm/livepatch.h | 5 -- arch/powerpc/kernel/livepatch_32.c | 123 ++++++--------------------- 2 files changed, 25 insertions(+), 103 deletions(-)
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index 285602e637f1..8ee1ceff8e41 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -150,11 +150,6 @@ int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame);
#endif /* CONFIG_LIVEPATCH_FTRACE */
-#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -struct klp_patch; -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif - static inline void klp_init_thread_info(struct task_struct *p) { /* + 1 to account for STACK_END_MAGIC */ diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 9ad9d92a4422..d047ab0ef67b 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -31,7 +31,6 @@
#if defined (CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined (CONFIG_LIVEPATCH_WO_FTRACE) -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE #endif
@@ -55,22 +54,11 @@ static bool is_jump_insn(u32 insn) }
struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; + void *data; int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); };
-static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -85,32 +73,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -166,7 +130,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -208,7 +172,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -221,7 +185,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, #endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -246,29 +210,15 @@ void notrace klp_walk_stackframe(struct stackframe *frame, } }
-static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - -static int klp_check_jump_func(struct stackframe *frame, void *data) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; + struct walk_stackframe_args *args = ws_args;
/* check NIP when the exception stack switching */ - if (frame->nip && !check_func_list(check_funcs, &args->ret, frame->nip)) + if (frame->nip && !args->check_func(args->data, &args->ret, frame->nip)) return args->ret; if (frame->link && !frame->nip_link_in_same_func && - !check_func_list(check_funcs, &args->ret, frame->link)) + !args->check_func(args->data, &args->ret, frame->link)) return args->ret; /* * There are two cases that frame->pc is reliable: @@ -276,24 +226,13 @@ static int klp_check_jump_func(struct stackframe *frame, void *data) * 2. nip and link are in same function; */ if (!frame->is_top_frame || frame->nip_link_in_same_func) { - if (!check_func_list(check_funcs, &args->ret, frame->pc)) + if (!args->check_func(args->data, &args->ret, frame->pc)) return args->ret; }
return 0; }
-static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; - - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } -} - static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { @@ -340,56 +279,44 @@ static int do_check_calltrace(struct walk_stackframe_args *args, return 0; }
-int klp_check_calltrace(struct klp_patch *patch, int enable) +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { - .ret = 0 + .data = data, + .ret = 0, + .check_func = check_func, };
- ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; - - args.check_funcs = check_funcs; - ret = do_check_calltrace(&args, klp_check_jump_func); - -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); }
-static int check_module_calltrace(struct stackframe *frame, void *data) +static int check_module_calltrace(struct stackframe *frame, void *ws_args) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data;
/* check NIP when the exception stack switching */ - if (frame->nip && within_module_core(frame->nip, args->mod)) + if (frame->nip && within_module_core(frame->nip, mod)) goto err_out; if (frame->link && !frame->nip_link_in_same_func && - within_module_core(frame->link, args->mod)) + within_module_core(frame->link, mod)) goto err_out; if (!frame->is_top_frame || frame->nip_link_in_same_func) { - if (within_module_core(frame->pc, args->mod)) + if (within_module_core(frame->pc, mod)) goto err_out; }
return 0;
err_out: - pr_err("module %s is in use!\n", args->mod->name); + pr_err("module %s is in use!\n", mod->name); return (args->ret = -EBUSY); }
int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 };
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/powerpc/kernel/livepatch_64.c | 131 ++++++----------------------- kernel/livepatch/core.c | 2 +- 2 files changed, 29 insertions(+), 104 deletions(-)
diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index bc9de72f71c3..3060c2b6f366 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -36,7 +36,6 @@
#if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined(CONFIG_LIVEPATCH_WO_FTRACE) -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE #endif
@@ -60,22 +59,11 @@ static bool is_jump_insn(u32 insn) }
struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; + void *data; int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); };
-static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -90,32 +78,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -169,7 +133,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -183,7 +147,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = ppc_function_entry( (void *)func->new_func); func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -209,7 +173,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, */ func_addr = (unsigned long)func->old_func; func_size = func->old_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, "OLD_FUNC", 0); if (ret) return ret; @@ -221,7 +185,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable,
func_addr = (unsigned long)&func_node->arch_data.trampoline; func_size = sizeof(struct ppc64_klp_btramp_entry); - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, "trampoline", 0); if (ret) return ret; @@ -247,29 +211,15 @@ static void notrace klp_walk_stackframe(struct stackframe *frame, } }
-static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - -static int klp_check_jump_func(struct stackframe *frame, void *data) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; + struct walk_stackframe_args *args = ws_args;
/* check NIP when the exception stack switching */ - if (frame->nip && !check_func_list(check_funcs, &args->ret, frame->nip)) + if (frame->nip && !args->check_func(args->data, &args->ret, frame->nip)) return args->ret; if (frame->link && !frame->nip_link_in_same_func && - !check_func_list(check_funcs, &args->ret, frame->link)) + !args->check_func(args->data, &args->ret, frame->link)) return args->ret; /* * There are two cases that frame->pc is reliable: @@ -277,24 +227,13 @@ static int klp_check_jump_func(struct stackframe *frame, void *data) * 2. nip and link are in same function; */ if (!frame->is_top_frame || frame->nip_link_in_same_func) { - if (!check_func_list(check_funcs, &args->ret, frame->pc)) + if (!args->check_func(args->data, &args->ret, frame->pc)) return args->ret; }
return 0; }
-static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; - - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } -} - static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { @@ -335,8 +274,6 @@ static int do_check_calltrace(struct walk_stackframe_args *args, frame.pc = stack[STACK_FRAME_LR_SAVE]; klp_walk_stackframe(&frame, fn, t, args); if (args->ret) { - pr_debug("%s FAILED when %s\n", __func__, - args->enable ? "enabling" : "disabling"); pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); show_stack(t, NULL, KERN_INFO); return args->ret; @@ -345,56 +282,44 @@ static int do_check_calltrace(struct walk_stackframe_args *args, return 0; }
-int klp_check_calltrace(struct klp_patch *patch, int enable) +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; - - args.check_funcs = check_funcs; - args.ret = 0; - args.enable = enable; - ret = do_check_calltrace(&args, klp_check_jump_func); + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + };
-out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); }
-static int check_module_calltrace(struct stackframe *frame, void *data) +static int check_module_calltrace(struct stackframe *frame, void *ws_args) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data;
/* check NIP when the exception stack switching */ - if (frame->nip && within_module_core(frame->nip, args->mod)) + if (frame->nip && within_module_core(frame->nip, mod)) goto err_out; if (frame->link && !frame->nip_link_in_same_func && - within_module_core(frame->link, args->mod)) + within_module_core(frame->link, mod)) goto err_out; if (!frame->is_top_frame || frame->nip_link_in_same_func) { - if (within_module_core(frame->pc, args->mod)) + if (within_module_core(frame->pc, mod)) goto err_out; }
return 0;
err_out: - pr_err("module %s is in use!\n", args->mod->name); + pr_err("module %s is in use!\n", mod->name); return (args->ret = -EBUSY); }
int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 };
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 050c75d59f21..345b57b67fbf 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1409,7 +1409,7 @@ static void free_func_list(struct klp_func_list **funcs) } }
-int __weak klp_check_calltrace(struct klp_patch *patch, int enable) +static int klp_check_calltrace(struct klp_patch *patch, int enable) { int ret = 0; struct klp_func_list *func_list = NULL;
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/x86/kernel/livepatch.c | 80 +++++++++++++++++++++++-------------- 1 file changed, 51 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 1fc4ac526943..3e992bc9a92a 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -252,9 +252,10 @@ static int klp_check_stack(void *trace_ptr, int trace_len, return 0; }
-static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +static int check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) { - struct task_struct *g, *t; int ret = 0; static unsigned long trace_entries[MAX_STACK_ENTRIES]; #ifdef CONFIG_ARCH_STACKWALK @@ -263,38 +264,48 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da struct stack_trace trace; #endif
+#ifdef CONFIG_ARCH_STACKWALK + ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES); + if (ret < 0) { + pr_err("%s:%d has an unreliable stack, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + trace_len = ret; + ret = klp_check_stack(trace_entries, trace_len, fn, data); +#else + trace.skip = 0; + trace.nr_entries = 0; + trace.max_entries = MAX_STACK_ENTRIES; + trace.entries = trace_entries; + ret = save_stack_trace_tsk_reliable(t, &trace); + if (ret) { + pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", + __func__, t->comm, t->pid, ret); + return ret; + } + ret = klp_check_stack(&trace, 0, fn, data); +#endif + if (ret) { + pr_err("%s:%d check stack failed, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + return 0; +} + +static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +{ + int ret = 0; + struct task_struct *g, *t; + for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) continue;
-#ifdef CONFIG_ARCH_STACKWALK - ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES); - if (ret < 0) { - pr_err("%s:%d has an unreliable stack, ret=%d\n", - t->comm, t->pid, ret); - return ret; - } - trace_len = ret; - ret = klp_check_stack(trace_entries, trace_len, fn, data); -#else - trace.skip = 0; - trace.nr_entries = 0; - trace.max_entries = MAX_STACK_ENTRIES; - trace.entries = trace_entries; - ret = save_stack_trace_tsk_reliable(t, &trace); - WARN_ON_ONCE(ret == -ENOSYS); - if (ret) { - pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", - __func__, t->comm, t->pid, ret); + ret = check_task_calltrace(t, fn, data); + if (ret) return ret; - } - ret = klp_check_stack(&trace, 0, fn, data); -#endif - if (ret) { - pr_err("%s:%d check stack failed, ret=%d\n", - t->comm, t->pid, ret); - return ret; - } }
return 0; @@ -312,6 +323,17 @@ static bool check_module_calltrace(void *data, int *ret, unsigned long pc) return true; }
+#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) +{ + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, fn, data); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { return do_check_calltrace(check_func, data);
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/kernel/livepatch.c | 67 ++++++++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 19 deletions(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 32f6813108f6..af7438d19a46 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -214,36 +214,65 @@ static int klp_check_jump_func(struct stackframe *frame, void *ws_args) return !args->check_func(args->data, &args->ret, frame->pc); }
+static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) +{ + struct stackframe frame; + + if (t == current) { + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.sp = current_stack_pointer; + frame.lr = (unsigned long)__builtin_return_address(0); + frame.pc = (unsigned long)check_task_calltrace; + } else { + frame.fp = thread_saved_fp(t); + frame.sp = thread_saved_sp(t); + frame.lr = 0; /* recovered from the stack */ + frame.pc = thread_saved_pc(t); + } + walk_stackframe(&frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { + int ret; struct task_struct *g, *t; - struct stackframe frame;
for_each_process_thread(g, t) { - if (t == current) { - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_stack_pointer; - frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)do_check_calltrace; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - frame.fp = thread_saved_fp(t); - frame.sp = thread_saved_sp(t); - frame.lr = 0; /* recovered from the stack */ - frame.pc = thread_saved_pc(t); - } - walk_stackframe(&frame, fn, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; } return 0; }
+#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { struct walk_stackframe_args args = {
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm64/kernel/livepatch.c | 77 ++++++++++++++++++++++++----------- 1 file changed, 53 insertions(+), 24 deletions(-)
diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index dbcc3f7c3347..90ad14d7c1fc 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -205,41 +205,70 @@ static bool klp_check_jump_func(void *ws_args, unsigned long pc) return args->check_func(args->data, &args->ret, pc); }
+static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) +{ + struct stackframe frame; + + /* + * Handle the current carefully on each CPUs, we shouldn't + * use saved FP and PC when backtrace current. It's difficult + * to backtrack other CPU currents here. But fortunately, + * all CPUs will stay in this function, so the current's + * backtrace is so similar + */ + if (t == current) { + /* current on this CPU */ + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.pc = (unsigned long)check_task_calltrace; + } else { + frame.fp = thread_saved_fp(t); + frame.pc = thread_saved_pc(t); + } + start_backtrace(&frame, frame.fp, frame.pc); + walk_stackframe(t, &frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + static int do_check_calltrace(struct walk_stackframe_args *args, bool (*fn)(void *, unsigned long)) { + int ret; struct task_struct *g, *t; - struct stackframe frame;
for_each_process_thread(g, t) { - /* - * Handle the current carefully on each CPUs, we shouldn't - * use saved FP and PC when backtrace current. It's difficult - * to backtrack other CPU currents here. But fortunately, - * all CPUs will stay in this function, so the current's - * backtrace is so similar - */ - if (t == current) { - /* current on this CPU */ - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)do_check_calltrace; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - frame.fp = thread_saved_fp(t); - frame.pc = thread_saved_pc(t); - } - start_backtrace(&frame, frame.fp, frame.pc); - walk_stackframe(t, &frame, fn, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; } return 0; }
+#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { struct walk_stackframe_args args = {
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/powerpc/kernel/livepatch_32.c | 98 +++++++++++++++++++----------- 1 file changed, 63 insertions(+), 35 deletions(-)
diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index d047ab0ef67b..ecd184098750 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -233,52 +233,80 @@ static int klp_check_jump_func(struct stackframe *frame, void *ws_args) return 0; }
+static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) +{ + struct stackframe frame = { 0 }; + unsigned long *stack; + + if (t == current) { + /* + * Handle the current carefully on each CPUs, we shouldn't + * use saved FP and PC when backtrace current. It's difficult + * to backtrack other CPU currents here. But fortunately, + * all CPUs will stay in this function, so the current's + * backtrace is so similar + */ + stack = (unsigned long *)current_stack_pointer; + } else { + /* + * Skip the first frame since it does not contain lr + * at normal position and nip is stored in the lr + * position in the second frame. + * See arch/powerpc/kernel/entry_32.S _switch . + */ + unsigned long s = *(unsigned long *)t->thread.ksp; + + if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) + return 0; + stack = (unsigned long *)s; + } + + frame.sp = (unsigned long)stack; + frame.pc = stack[STACK_FRAME_LR_SAVE]; + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { + int ret; struct task_struct *g, *t; - unsigned long *stack;
for_each_process_thread(g, t) { - struct stackframe frame = { 0 }; - - if (t == current) { - /* - * Handle the current carefully on each CPUs, we shouldn't - * use saved FP and PC when backtrace current. It's difficult - * to backtrack other CPU currents here. But fortunately, - * all CPUs will stay in this function, so the current's - * backtrace is so similar - */ - stack = (unsigned long *)current_stack_pointer; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - /* - * Skip the first frame since it does not contain lr - * at normal position and nip is stored in the lr - * position in the second frame. - * See arch/powerpc/kernel/entry_32.S _switch . - */ - unsigned long s = *(unsigned long *)t->thread.ksp; - - if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) - continue; - stack = (unsigned long *)s; - } - - frame.sp = (unsigned long)stack; - frame.pc = stack[STACK_FRAME_LR_SAVE]; - klp_walk_stackframe(&frame, fn, t, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; } return 0; }
+#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { struct walk_stackframe_args args = {
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/powerpc/kernel/livepatch_64.c | 102 ++++++++++++++++++----------- 1 file changed, 65 insertions(+), 37 deletions(-)
diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 3060c2b6f366..a78f5ab12d60 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -234,54 +234,82 @@ static int klp_check_jump_func(struct stackframe *frame, void *ws_args) return 0; }
+static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) +{ + struct stackframe frame = { 0 }; + unsigned long *stack; + + if (t == current) { + /* + * Handle the current carefully on each CPUs, + * we shouldn't use saved FP and PC when + * backtrace current. It's difficult to + * backtrack other CPU currents here. But + * fortunately,all CPUs will stay in this + * function, so the current's backtrace is + * so similar + */ + stack = (unsigned long *)current_stack_pointer; + } else { + /* + * Skip the first frame since it does not contain lr + * at notmal position and nip is store ind the lr + * position in the second frame. + * See arch/powerpc/kernel/entry_64.S _switch . + */ + unsigned long s = *(unsigned long *)t->thread.ksp; + + if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) + return 0; + stack = (unsigned long *)s; + } + + frame.sp = (unsigned long)stack; + frame.pc = stack[STACK_FRAME_LR_SAVE]; + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { + int ret; struct task_struct *g, *t; - unsigned long *stack;
for_each_process_thread(g, t) { - struct stackframe frame = { 0 }; - - if (t == current) { - /* - * Handle the current carefully on each CPUs, - * we shouldn't use saved FP and PC when - * backtrace current. It's difficult to - * backtrack other CPU currents here. But - * fortunately,all CPUs will stay in this - * function, so the current's backtrace is - * so similar - */ - stack = (unsigned long *)current_stack_pointer; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - /* - * Skip the first frame since it does not contain lr - * at notmal position and nip is store ind the lr - * position in the second frame. - * See arch/powerpc/kernel/entry_64.S _switch . - */ - unsigned long s = *(unsigned long *)t->thread.ksp; - - if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) - continue; - stack = (unsigned long *)s; - } - - frame.sp = (unsigned long)stack; - frame.pc = stack[STACK_FRAME_LR_SAVE]; - klp_walk_stackframe(&frame, fn, t, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; } return 0; }
+#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { struct walk_stackframe_args args = {
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/x86/kernel/livepatch.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 3e992bc9a92a..b6803cd7e0af 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -445,6 +445,21 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; }
+static void klp_patch_text(void *dst, const void *src, int len) +{ + if (len <= 1) + return; + /* skip breakpoint at first */ + text_poke(dst + 1, src + 1, len - 1); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + /* update jmp opcode */ + text_poke(dst, src, 1); +} + int arch_klp_patch_func(struct klp_func *func) { struct klp_func_node *func_node; @@ -457,15 +472,7 @@ int arch_klp_patch_func(struct klp_func *func) new_addr = (unsigned long)func->new_func; /* replace the text with the new text */ new = (unsigned char *)klp_jmp_code(ip, new_addr); -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY - /* update jmp offset */ - text_poke((void *)(ip + 1), new + 1, JMP_E9_INSN_SIZE - 1); - /* update jmp opcode */ - text_poke((void *)ip, new, 1); -#else - text_poke((void *)ip, new, JMP_E9_INSN_SIZE); -#endif - + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); return 0; }
@@ -490,6 +497,6 @@ void arch_klp_unpatch_func(struct klp_func *func) }
/* replace the text with the new text */ - text_poke((void *)ip, new, JMP_E9_INSN_SIZE); + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); } #endif
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/kernel/livepatch.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index af7438d19a46..a8e9088ca778 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -383,14 +383,29 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; }
+static void klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + + if (len <= 0) + return; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) + __patch_text(dst + i, src[i]); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + __patch_text(dst, src[0]); +} + static int do_patch(unsigned long pc, unsigned long new_addr) { u32 insns[LJMP_INSN_SIZE];
if (!offset_in_range(pc, new_addr, SZ_32M)) { #ifdef CONFIG_ARM_MODULE_PLTS - int i; - /* * [0] LDR PC, [PC+8] * [4] nop @@ -400,8 +415,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) insns[1] = __opcode_to_mem_arm(0xe320f000); insns[2] = new_addr;
- for (i = 0; i < LJMP_INSN_SIZE; i++) - __patch_text(((u32 *)pc) + i, insns[i]); + klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); #else /* * When offset from 'new_addr' to 'pc' is out of SZ_32M range but @@ -412,7 +426,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) #endif } else { insns[0] = arm_gen_branch(pc, new_addr); - __patch_text((void *)pc, insns[0]); + klp_patch_text((u32 *)pc, insns, 1); } return 0; } @@ -440,11 +454,7 @@ void arch_klp_unpatch_func(struct klp_func *func) pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - int i; - - for (i = 0; i < LJMP_INSN_SIZE; i++) { - __patch_text(((u32 *)pc) + i, func_node->arch_data.old_insns[i]); - } + klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE); } else { next_func = list_first_or_null_rcu(&func_node->func_stack, struct klp_func, stack_node);
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm64/kernel/livepatch.c | 47 ++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 17 deletions(-)
diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 90ad14d7c1fc..a303fd4a69c9 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -358,6 +358,27 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; }
+static int klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + int ret; + + if (len <= 0) + return -EINVAL; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) { + ret = aarch64_insn_patch_text_nosync(dst + i, src[i]); + if (ret) + return ret; + } + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + return aarch64_insn_patch_text_nosync(dst, src[0]); +} + static int do_patch(unsigned long pc, unsigned long new_addr) { u32 insns[LJMP_INSN_SIZE]; @@ -366,26 +387,22 @@ static int do_patch(unsigned long pc, unsigned long new_addr) if (offset_in_range(pc, new_addr, SZ_128M)) { insns[0] = aarch64_insn_gen_branch_imm(pc, new_addr, AARCH64_INSN_BRANCH_NOLINK); - ret = aarch64_insn_patch_text_nosync((void *)pc, insns[0]); + ret = klp_patch_text((u32 *)pc, insns, 1); if (ret) { pr_err("patch instruction small range failed, ret=%d\n", ret); return -EPERM; } } else { #ifdef CONFIG_ARM64_MODULE_PLTS - int i;
insns[0] = 0x92800010 | (((~new_addr) & 0xffff)) << 5; insns[1] = 0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5; insns[2] = 0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5; insns[3] = 0xd61f0200; - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i]); - if (ret) { - pr_err("patch instruction %d large range failed, ret=%d\n", - i, ret); - return -EPERM; - } + ret = klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("patch instruction large range failed, ret=%d\n", ret); + return -EPERM; } #else /* @@ -417,20 +434,16 @@ void arch_klp_unpatch_func(struct klp_func *func) struct klp_func_node *func_node; struct klp_func *next_func; unsigned long pc; - int i; int ret;
func_node = func->func_node; pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, - func_node->arch_data.old_insns[i]); - if (ret) { - pr_err("restore instruction %d failed, ret=%d\n", i, ret); - return; - } + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; } } else { next_func = list_first_or_null_rcu(&func_node->func_stack,
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/powerpc/include/asm/livepatch.h | 1 + arch/powerpc/kernel/livepatch.c | 22 ++++++++++++++++++++++ arch/powerpc/kernel/livepatch_32.c | 26 +++++++++----------------- 3 files changed, 32 insertions(+), 17 deletions(-)
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index 8ee1ceff8e41..243c8f93ff6e 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -147,6 +147,7 @@ void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_module_check_calltrace(void *data); int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame); +int klp_patch_text(u32 *dst, const u32 *src, int len);
#endif /* CONFIG_LIVEPATCH_FTRACE */
diff --git a/arch/powerpc/kernel/livepatch.c b/arch/powerpc/kernel/livepatch.c index 5ba38c2c7c5c..a522f1ce2494 100644 --- a/arch/powerpc/kernel/livepatch.c +++ b/arch/powerpc/kernel/livepatch.c @@ -136,3 +136,25 @@ int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame)
return 0; } + +int klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + int ret; + + if (len <= 0) + return -EINVAL; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) { + ret = patch_instruction((struct ppc_inst *)(dst + i), + ppc_inst(src[i])); + if (ret) + return ret; + } + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + return patch_instruction((struct ppc_inst *)dst, ppc_inst(src[0])); +} diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index ecd184098750..a04b974809b3 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -379,7 +379,6 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) static int do_patch(unsigned long pc, unsigned long new_addr) { int ret; - int i; u32 insns[LJMP_INSN_SIZE];
if (offset_in_range(pc, new_addr, SZ_32M)) { @@ -403,14 +402,10 @@ static int do_patch(unsigned long pc, unsigned long new_addr) insns[2] = 0x7d8903a6; insns[3] = 0x4e800420;
- for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), - ppc_inst(insns[i])); - if (ret) { - pr_err("patch instruction %d large range failed, ret=%d\n", - i, ret); - return -EPERM; - } + ret = klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("patch instruction large range failed, ret=%d\n", ret); + return -EPERM; } } return 0; @@ -434,20 +429,17 @@ void arch_klp_unpatch_func(struct klp_func *func) struct klp_func_node *func_node; struct klp_func *next_func; unsigned long pc; - int i; int ret;
func_node = func->func_node; pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), - ppc_inst(func_node->arch_data.old_insns[i])); - if (ret) { - pr_err("restore instruction %d failed, ret=%d\n", i, ret); - return; - } + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, + LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; } } else { next_func = list_first_or_null_rcu(&func_node->func_stack,
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/powerpc/kernel/livepatch_64.c | 13 +++++-------- arch/powerpc/kernel/module_64.c | 29 ++++++++++++++++++++--------- 2 files changed, 25 insertions(+), 17 deletions(-)
diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index a78f5ab12d60..55821abfff14 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -412,20 +412,17 @@ void arch_klp_unpatch_func(struct klp_func *func) struct klp_func_node *func_node; struct klp_func *next_func; unsigned long pc; - int i; int ret;
func_node = func->func_node; pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = patch_instruction((struct ppc_inst *)((u32 *)pc + i), - ppc_inst(func_node->arch_data.old_insns[i])); - if (ret) { - pr_err("restore instruction %d failed, ret=%d\n", i, ret); - break; - } + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, + LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; }
pr_debug("[%s %d] restore insns at 0x%lx\n", __func__, __LINE__, pc); diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index ef093691f606..d0e4581b0cf0 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -817,17 +817,18 @@ int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) * Patch jump stub to reference trampoline * without saved the old R2 and load the new R2. */ -static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, +static int livepatch_create_bstub(void *pc, unsigned long addr, struct module *me) { long reladdr; unsigned long my_r2; unsigned long stub_start, stub_end, stub_size; + struct ppc64_klp_bstub_entry entry;
/* Stub uses address relative to r2. */ my_r2 = me ? me->arch.toc : kernel_toc_addr(); - reladdr = (unsigned long)entry - my_r2; + reladdr = (unsigned long)pc - my_r2; if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { pr_err("%s: Address %p of jump stub out of range of %p.\n", me ? me->name : "kernel", @@ -839,15 +840,25 @@ static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, stub_start = ppc_function_entry((void *)livepatch_branch_stub); stub_end = ppc_function_entry((void *)livepatch_branch_stub_end); stub_size = stub_end - stub_start; - memcpy(entry->jump, (u32 *)stub_start, stub_size); + memcpy(entry.jump, (u32 *)stub_start, stub_size); + + entry.jump[0] |= PPC_HA(reladdr); + entry.jump[1] |= PPC_LO(reladdr); + entry.magic = BRANCH_STUB_MAGIC; + entry.trampoline = addr;
- entry->jump[0] |= PPC_HA(reladdr); - entry->jump[1] |= PPC_LO(reladdr); - entry->magic = BRANCH_STUB_MAGIC; - entry->trampoline = addr;
+ /* skip breakpoint at first */ + memcpy(pc + PPC64_INSN_SIZE, (void *)&entry + PPC64_INSN_SIZE, + sizeof(entry) - PPC64_INSN_SIZE); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + memcpy(pc, (void *)&entry, PPC64_INSN_SIZE); pr_debug("Create livepatch branch stub 0x%px with reladdr 0x%lx r2 0x%lx to trampoline 0x%lx\n", - (void *)entry, reladdr, my_r2, addr); + pc, reladdr, my_r2, addr);
return 1; } @@ -898,7 +909,7 @@ int livepatch_create_branch(unsigned long pc, #endif
/* Create stub to trampoline */ - if (!livepatch_create_bstub((struct ppc64_klp_bstub_entry *)pc, trampoline, me)) + if (!livepatch_create_bstub((void *)pc, trampoline, me)) return -EINVAL;
return 0;
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
After an old function has been placed a 'brk' instruction, every call of that function will jump to new function in exception handler. So we can check calltrace of every task to make sure no one is running in the old function without using stop_machine, then we can directly replace the old function with instructions that will jump to new function.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- include/linux/livepatch.h | 4 + kernel/livepatch/Kconfig | 11 +++ kernel/livepatch/core.c | 202 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 208 insertions(+), 9 deletions(-)
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index b4cf90c03d29..7146989b5fbc 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -321,7 +321,11 @@ static inline int klp_module_coming(struct module *mod) { return 0; } static inline void klp_module_going(struct module *mod) {} static inline bool klp_patch_pending(struct task_struct *task) { return false; } static inline void klp_update_patch_state(struct task_struct *task) {} +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +void klp_copy_process(struct task_struct *child); +#else static inline void klp_copy_process(struct task_struct *child) {} +#endif static inline bool klp_have_reliable_stack(void) { return true; }
#ifndef klp_smp_isb diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index 297ca41c695e..a59cbb6506cb 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -98,5 +98,16 @@ config LIVEPATCH_RESTRICT_KPROBE We should not patch for the functions where registered with kprobe, and vice versa. Say Y here if you want to check those. + +config LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE + bool "No stop_machine in breakpoint optimization mode" + depends on LIVEPATCH_WO_FTRACE + default n + help + In breakpoint optimization mode, check tasks calltrace + in batches without using stop machine so that reduce the + service downtime. + Say N if you are unsure. + endmenu endif diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 345b57b67fbf..3bf53800868e 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -2051,18 +2051,185 @@ static bool klp_use_breakpoint(struct klp_patch *patch) return true; }
-static int klp_breakpoint_optimize(struct klp_patch *patch) +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +#include <linux/sched/task.h> +#include "../sched/sched.h" + +int __weak arch_klp_check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) { - int ret; + return -EINVAL; +} + +/* Called from copy_process() during fork */ +void klp_copy_process(struct task_struct *child) +{ + child->patch_state = current->patch_state; +} + +static void set_tasks_patch_state(int patch_state) +{ + unsigned int cpu; + struct task_struct *g, *task; + + read_lock(&tasklist_lock); + for_each_process_thread(g, task) { + task->patch_state = patch_state; + } + read_unlock(&tasklist_lock); + + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + task->patch_state = patch_state; + } + put_online_cpus(); +} + +static void update_patch_state(struct task_struct *task, struct klp_func_list *func_list) +{ + struct rq *rq; + struct rq_flags flags; + + if (task->patch_state == KLP_PATCHED) + return; + WARN_ON_ONCE(task->patch_state != KLP_UNPATCHED); + rq = task_rq_lock(task, &flags); + if (task_running(rq, task) && task != current) + goto done; + if (arch_klp_check_task_calltrace(task, check_func_list, (void *)func_list)) + goto done; + task->patch_state = KLP_PATCHED; +done: + task_rq_unlock(rq, task, &flags); +} + +#ifdef CONFIG_SMP +static void check_task_calltrace_ipi(void *func_list) +{ + if (current->patch_state == KLP_PATCHED) + return; + if (arch_klp_check_task_calltrace(current, check_func_list, func_list)) + return; + current->patch_state = KLP_PATCHED; +} + +static void update_patch_state_ipi(struct klp_func_list *func_list) +{ + unsigned int cpu; + unsigned int curr_cpu; + + preempt_disable(); + curr_cpu = smp_processor_id(); + for_each_online_cpu(cpu) { + if (cpu == curr_cpu) + continue; + smp_call_function_single(cpu, check_task_calltrace_ipi, func_list, 1); + } + preempt_enable(); +} +#endif + +static void update_tasks_patch_state(struct klp_func_list *func_list) +{ + unsigned int cpu; + struct task_struct *g, *task; + + read_lock(&tasklist_lock); + for_each_process_thread(g, task) + update_patch_state(task, func_list); + read_unlock(&tasklist_lock); + + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + if (cpu_online(cpu)) { + update_patch_state(task, func_list); + } else if (task->patch_state != KLP_PATCHED) { + /* offline idle tasks can be directly updated */ + task->patch_state = KLP_PATCHED; + } + } + put_online_cpus(); +#ifdef CONFIG_SMP + update_patch_state_ipi(func_list); +#endif +} + +static bool is_patchable(void) +{ + unsigned int cpu; + struct task_struct *g, *task; + int patchable = true; + + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + WARN_ON_ONCE(task->patch_state == KLP_UNDEFINED); + if (task->patch_state != KLP_PATCHED) { + put_online_cpus(); + return false; + } + } + put_online_cpus(); + read_lock(&tasklist_lock); + for_each_process_thread(g, task) { + WARN_ON_ONCE(task->patch_state == KLP_UNDEFINED); + if (task->patch_state != KLP_PATCHED) { + patchable = false; + goto out_unlock; + } + } +out_unlock: + read_unlock(&tasklist_lock); + return patchable; +} + +static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) +{ + struct klp_func_list *func_list = NULL; + int ret = -EINVAL; int i; - int cnt = 0; + int retry_cnt = 0;
- ret = klp_add_breakpoint(patch); + ret = arch_klp_check_activeness_func(patch, true, add_func_to_list, &func_list); if (ret) { - pr_err("failed to add breakpoints, ret=%d\n", ret); - return ret; + pr_err("break optimize collecting active functions failed, ret=%d\n", ret); + goto out; }
+ set_tasks_patch_state(KLP_UNPATCHED); + + for (i = 0; i < KLP_RETRY_COUNT; i++) { + retry_cnt++; + + update_tasks_patch_state(func_list); + if (is_patchable()) { + arch_klp_code_modify_prepare(); + ret = enable_patch(patch, true); + arch_klp_code_modify_post_process(); + break; + } + ret = -EAGAIN; + pr_notice("try again in %d ms\n", KLP_RETRY_INTERVAL); + msleep(KLP_RETRY_INTERVAL); + } + set_tasks_patch_state(KLP_UNDEFINED); +out: + free_func_list(&func_list); + *cnt = retry_cnt; + return ret; +} + +#else /* !CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE */ + +static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) +{ + int ret = -EINVAL; + int i; + int retry_cnt = 0; + for (i = 0; i < KLP_RETRY_COUNT; i++) { struct patch_data patch_data = { .patch = patch, @@ -2073,20 +2240,37 @@ static int klp_breakpoint_optimize(struct klp_patch *patch) if (i == KLP_RETRY_COUNT - 1) patch_data.rollback = true;
- cnt++; + retry_cnt++;
ret = klp_stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); if (!ret || ret != -EAGAIN) break;
- pr_notice("try again in %d ms.\n", KLP_RETRY_INTERVAL); + pr_notice("try again in %d ms\n", KLP_RETRY_INTERVAL);
msleep(KLP_RETRY_INTERVAL); } + *cnt = retry_cnt; + return ret; +} +#endif /* CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE */ + +static int klp_breakpoint_optimize(struct klp_patch *patch) +{ + int ret; + int cnt = 0; + + ret = klp_add_breakpoint(patch); + if (ret) { + pr_err("failed to add breakpoints, ret=%d\n", ret); + return ret; + } + + ret = klp_breakpoint_enable_patch(patch, &cnt); + pr_notice("patching %s, tried %d times, ret=%d.\n", ret ? "failed" : "success", cnt, ret); - /* * If the patch is enabled successfully, the breakpoint instruction * has been replaced with the jump instruction. However, if the patch
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
As before, task calltrace checking was assumed being performed under stop_machine and tasks to be checked are all not in running state. So there is one case that old function not needed to check, that is: preemption disabled and 'force' field not be KLP_NORMAL_FORCE and no 'call' instructions in livepatch replace area.
But when using breakpoint optimization without stop_machine, tasks may be running, we can not ignore above check.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/kernel/livepatch.c | 1 + arch/arm64/kernel/livepatch.c | 1 + arch/powerpc/kernel/livepatch_32.c | 1 + arch/powerpc/kernel/livepatch_64.c | 1 + arch/x86/kernel/livepatch.c | 1 + 5 files changed, 5 insertions(+)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index a8e9088ca778..41ee14992b59 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -138,6 +138,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck, diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index a303fd4a69c9..90932ac5f3f5 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -127,6 +127,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck, diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index a04b974809b3..65461fbb6c7c 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -128,6 +128,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck, diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 55821abfff14..8ccf1d6360b5 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -131,6 +131,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck, diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index b6803cd7e0af..0675357bd16c 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -124,6 +124,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck,
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
for_each_process_thread() only include init_task and its descendant, but not include idle tasks. Idle tasks also require calltraces checking.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/kernel/livepatch.c | 6 ++++++ arch/arm64/kernel/livepatch.c | 6 ++++++ arch/powerpc/kernel/livepatch_32.c | 6 ++++++ arch/powerpc/kernel/livepatch_64.c | 6 ++++++ arch/x86/kernel/livepatch.c | 7 ++++++- 5 files changed, 30 insertions(+), 1 deletion(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 41ee14992b59..af298045f67f 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -246,6 +246,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, { int ret; struct task_struct *g, *t; + unsigned int cpu;
for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -254,6 +255,11 @@ static int do_check_calltrace(struct walk_stackframe_args *args, if (ret) return ret; } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } return 0; }
diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 90932ac5f3f5..71a95489855b 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -242,6 +242,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, { int ret; struct task_struct *g, *t; + unsigned int cpu;
for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -250,6 +251,11 @@ static int do_check_calltrace(struct walk_stackframe_args *args, if (ret) return ret; } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } return 0; }
diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 65461fbb6c7c..16f97475ef93 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -280,6 +280,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, { int ret; struct task_struct *g, *t; + unsigned int cpu;
for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -288,6 +289,11 @@ static int do_check_calltrace(struct walk_stackframe_args *args, if (ret) return ret; } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } return 0; }
diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 8ccf1d6360b5..1dcca852618b 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -283,6 +283,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, { int ret; struct task_struct *g, *t; + unsigned int cpu;
for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -291,6 +292,11 @@ static int do_check_calltrace(struct walk_stackframe_args *args, if (ret) return ret; } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } return 0; }
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 0675357bd16c..b630bab1060a 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -299,6 +299,7 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da { int ret = 0; struct task_struct *g, *t; + unsigned int cpu;
for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -308,7 +309,11 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da if (ret) return ret; } - + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), fn, data); + if (ret) + return ret; + } return 0; }
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Struct 'klp_func_list' is used to organize active functions with unidirectional linked list, it is too complicated.
Here rename it as 'actv_func', and organize with 'list_head' to make it clear.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/kernel/livepatch.c | 12 ++--- arch/arm64/kernel/livepatch.c | 12 ++--- arch/powerpc/kernel/livepatch_32.c | 10 ++-- arch/powerpc/kernel/livepatch_64.c | 12 ++--- arch/x86/kernel/livepatch.c | 10 ++-- include/linux/livepatch.h | 10 +--- kernel/livepatch/core.c | 79 +++++++++++++++--------------- 7 files changed, 62 insertions(+), 83 deletions(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index af298045f67f..46adc986eb2e 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -84,7 +84,7 @@ static bool check_jump_insn(unsigned long func_addr) }
int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; @@ -92,7 +92,6 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, struct klp_func *func; unsigned long func_addr = 0; unsigned long func_size; - struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -141,8 +140,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -183,8 +181,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -197,8 +194,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, #endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 71a95489855b..45ac28b0f448 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -77,7 +77,7 @@ static bool check_jump_insn(unsigned long func_addr) }
int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; @@ -85,7 +85,6 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, unsigned long func_addr = 0; unsigned long func_size; struct klp_func_node *func_node; - struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -130,8 +129,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -173,8 +171,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -188,8 +185,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable,
func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 16f97475ef93..424c0c972ff7 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -74,7 +74,7 @@ static bool check_jump_insn(unsigned long func_addr) }
int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; @@ -82,7 +82,6 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, unsigned long func_addr = 0; unsigned long func_size; struct klp_func_node *func_node; - struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -131,8 +130,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -173,7 +171,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -186,7 +184,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, #endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 1dcca852618b..04a0508d09c8 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -79,14 +79,13 @@ static bool check_jump_insn(unsigned long func_addr) }
int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL; - struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -134,8 +133,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -148,7 +146,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = ppc_function_entry( (void *)func->new_func); func_size = func->new_size; - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -174,7 +172,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, */ func_addr = (unsigned long)func->old_func; func_size = func->old_size; - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, "OLD_FUNC", 0); if (ret) return ret; @@ -186,7 +184,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable,
func_addr = (unsigned long)&func_node->arch_data.trampoline; func_size = sizeof(struct ppc64_klp_btramp_entry); - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, "trampoline", 0); if (ret) return ret; diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index b630bab1060a..04aa07e95f46 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -73,7 +73,7 @@ static bool check_jump_insn(unsigned long func_addr) }
int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; @@ -81,7 +81,6 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, unsigned long func_addr = 0; unsigned long func_size; struct klp_func_node *func_node = NULL; - struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -127,8 +126,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -167,7 +165,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -181,7 +179,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable,
func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 7146989b5fbc..427485f73793 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -259,15 +259,7 @@ int klp_compare_address(unsigned long pc, unsigned long func_addr, void arch_klp_init(void); int klp_module_delete_safety_check(struct module *mod);
-struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - -typedef int (*klp_add_func_t)(struct klp_func_list **funcs, struct klp_func_list **func, +typedef int (*klp_add_func_t)(struct list_head *func_list, unsigned long func_addr, unsigned long func_size, const char *func_name, int force);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 3bf53800868e..d47d243499b3 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1346,7 +1346,7 @@ int __weak arch_klp_check_calltrace(bool (*fn)(void *, int *, unsigned long), vo
int __weak arch_klp_check_activeness_func(struct klp_patch *patch, int enable, klp_add_func_t add_func, - struct klp_func_list **func_list) + struct list_head *func_list) { return -EINVAL; } @@ -1361,58 +1361,59 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; }
+struct actv_func { + struct list_head list; + unsigned long func_addr; + unsigned long func_size; + const char *func_name; + int force; +}; + static bool check_func_list(void *data, int *ret, unsigned long pc) { - struct klp_func_list *funcs = (struct klp_func_list *)data; + struct list_head *func_list = (struct list_head *)data; + struct actv_func *func = NULL;
- while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); + list_for_each_entry(func, func_list, list) { + *ret = klp_compare_address(pc, func->func_addr, func->func_name, + klp_size_to_check(func->func_size, func->force)); if (*ret) return false; - funcs = funcs->next; } return true; }
-static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) +static int add_func_to_list(struct list_head *func_list, unsigned long func_addr, + unsigned long func_size, const char *func_name, + int force) { - if (*func == NULL) { - *funcs = kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; + struct actv_func *func = kzalloc(sizeof(struct actv_func), GFP_ATOMIC); + + if (!func) + return -ENOMEM; + func->func_addr = func_addr; + func->func_size = func_size; + func->func_name = func_name; + func->force = force; + list_add_tail(&func->list, func_list); return 0; }
-static void free_func_list(struct klp_func_list **funcs) +static void free_func_list(struct list_head *func_list) { - struct klp_func_list *p; + struct actv_func *func = NULL; + struct actv_func *tmp = NULL;
- while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); + list_for_each_entry_safe(func, tmp, func_list, list) { + list_del(&func->list); + kfree(func); } }
static int klp_check_calltrace(struct klp_patch *patch, int enable) { int ret = 0; - struct klp_func_list *func_list = NULL; + LIST_HEAD(func_list);
ret = arch_klp_check_activeness_func(patch, enable, add_func_to_list, &func_list); if (ret) { @@ -1420,10 +1421,10 @@ static int klp_check_calltrace(struct klp_patch *patch, int enable) goto out; }
- if (!func_list) + if (list_empty(&func_list)) goto out;
- ret = arch_klp_check_calltrace(check_func_list, (void *)func_list); + ret = arch_klp_check_calltrace(check_func_list, (void *)&func_list);
out: free_func_list(&func_list); @@ -2087,7 +2088,7 @@ static void set_tasks_patch_state(int patch_state) put_online_cpus(); }
-static void update_patch_state(struct task_struct *task, struct klp_func_list *func_list) +static void update_patch_state(struct task_struct *task, struct list_head *func_list) { struct rq *rq; struct rq_flags flags; @@ -2115,7 +2116,7 @@ static void check_task_calltrace_ipi(void *func_list) current->patch_state = KLP_PATCHED; }
-static void update_patch_state_ipi(struct klp_func_list *func_list) +static void update_patch_state_ipi(struct list_head *func_list) { unsigned int cpu; unsigned int curr_cpu; @@ -2131,7 +2132,7 @@ static void update_patch_state_ipi(struct klp_func_list *func_list) } #endif
-static void update_tasks_patch_state(struct klp_func_list *func_list) +static void update_tasks_patch_state(struct list_head *func_list) { unsigned int cpu; struct task_struct *g, *task; @@ -2188,7 +2189,7 @@ static bool is_patchable(void)
static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) { - struct klp_func_list *func_list = NULL; + LIST_HEAD(func_list); int ret = -EINVAL; int i; int retry_cnt = 0; @@ -2204,7 +2205,7 @@ static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) for (i = 0; i < KLP_RETRY_COUNT; i++) { retry_cnt++;
- update_tasks_patch_state(func_list); + update_tasks_patch_state(&func_list); if (is_patchable()) { arch_klp_code_modify_prepare(); ret = enable_patch(patch, true);
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Fix cmetrics warning like below: cmetrics-the depth of the method {arch_klp_check_activeness_func()} is 6, it is over 4
At the same time, arch_klp_check_activeness_func() in x86/arm/arm64/ppc32 are almost the same, so move it out of arch and reduce duplicate codes.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/kernel/livepatch.c | 123 +----------------- arch/arm64/kernel/livepatch.c | 121 +----------------- arch/powerpc/kernel/livepatch_32.c | 123 +----------------- arch/powerpc/kernel/livepatch_64.c | 193 ++++++++++++++--------------- arch/x86/kernel/livepatch.c | 119 +----------------- kernel/livepatch/core.c | 122 +++++++++++++++++- 6 files changed, 213 insertions(+), 588 deletions(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 46adc986eb2e..3379fbf16dd4 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -69,7 +69,7 @@ struct walk_stackframe_args { bool (*check_func)(void *data, int *ret, unsigned long pc); };
-static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; @@ -83,127 +83,6 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct list_head *func_list) -{ - int ret; - struct klp_object *obj; - struct klp_func_node *func_node; - struct klp_func *func; - unsigned long func_addr = 0; - unsigned long func_size; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - unsigned long old_func = (unsigned long)func->old_func; - - if (enable) { - bool need_check_old = false; - - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node || - list_empty(&func_node->func_stack)) { - /* - * No patched on this function - * [ the origin one ] - */ - func_addr = old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [ the active one ] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemption is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of intructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - need_check_old = (func_addr != old_func); - } - if (need_check_old) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the previously - * patched function and the function itself - * which to be unpatched. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node) - return -EINVAL; -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func(func_list, func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - if (func_addr != old_func) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, 0); - if (ret) - return ret; - } -#endif - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func(func_list, func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { struct walk_stackframe_args *args = ws_args; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 45ac28b0f448..6675569c8a4b 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -62,7 +62,7 @@ struct walk_stackframe_args { bool (*check_func)(void *data, int *ret, unsigned long pc); };
-static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; @@ -76,125 +76,6 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct list_head *func_list) -{ - int ret; - struct klp_object *obj; - struct klp_func *func; - unsigned long func_addr = 0; - unsigned long func_size; - struct klp_func_node *func_node; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - unsigned long old_func = (unsigned long)func->old_func; - - if (enable) { - bool need_check_old = false; - - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node || - list_empty(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [the active one] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemption is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - need_check_old = (func_addr != old_func); - } - if (need_check_old) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the previously - * patched function and the function itself - * which to be unpatched. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node) { - return -EINVAL; - } -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func(func_list, func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - if (func_addr != old_func) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, 0); - if (ret) - return ret; - } -#endif - - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func(func_list, func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - static bool klp_check_jump_func(void *ws_args, unsigned long pc) { struct walk_stackframe_args *args = ws_args; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 424c0c972ff7..94aa7af22d34 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -59,7 +59,7 @@ struct walk_stackframe_args { bool (*check_func)(void *data, int *ret, unsigned long pc); };
-static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; @@ -73,127 +73,6 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct list_head *func_list) -{ - int ret; - struct klp_object *obj; - struct klp_func *func; - unsigned long func_addr = 0; - unsigned long func_size; - struct klp_func_node *func_node; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - unsigned long old_func = (unsigned long)func->old_func; - - if (enable) { - bool need_check_old = false; - - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node || - list_empty(&func_node->func_stack)) { - /* - * No patched on this function - * [ the origin one ] - */ - func_addr = old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [ the active one ] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemtion is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - need_check_old = (func_addr != old_func); - } - if (need_check_old) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the previously - * patched function and the function itself - * which to be unpatched. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node) - return -EINVAL; -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - if (func_addr != old_func) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, 0); - if (ret) - return ret; - } -#endif - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - void notrace klp_walk_stackframe(struct stackframe *frame, int (*fn)(struct stackframe *, void *), struct task_struct *tsk, void *data) diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 04a0508d09c8..9a968c7616c7 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -78,120 +78,109 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, +int arch_klp_check_activeness_func(struct klp_func *func, int enable, klp_add_func_t add_func, struct list_head *func_list) { int ret; - struct klp_object *obj; - struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL;
- for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - func_node = klp_find_func_node(func->old_func); - - /* Check func address in stack */ - if (enable) { - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - if (!func_node || - list_empty(&func_node->func_stack)) { - /* - * No patched on this function - * [ the origin one ] - */ - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [ the active one ] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = ppc_function_entry( - (void *)prev->new_func); - func_size = prev->new_size; - } - /* - * When preemption is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be repalced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the function itself - * which to be unpatched. - */ - func_addr = ppc_function_entry( - (void *)func->new_func); - func_size = func->new_size; - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - } - -#ifdef PPC64_ELF_ABI_v1 + func_node = klp_find_func_node(func->old_func); + /* Check func address in stack */ + if (enable) { + if (func->patched || func->force == KLP_ENFORCEMENT) + return 0; + /* + * When enable, checking the currently + * active functions. + */ + if (!func_node || list_empty(&func_node->func_stack)) { /* - * Check trampoline in stack - * new_func callchain: - * old_func - * -=> trampoline - * -=> new_func - * so, we should check all the func in the callchain + * No patched on this function + * [ the origin one ] */ - if (func_addr != (unsigned long)func->old_func) { + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + } else { + /* + * Previously patched function + * [ the active one ] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + func_addr = ppc_function_entry((void *)prev->new_func); + func_size = prev->new_size; + } + /* + * When preemption is disabled and the + * replacement area does not contain a jump + * instruction, the migration thread is + * scheduled to run stop machine only after the + * excution of instructions to be repalced is + * complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || + (func->force == KLP_NORMAL_FORCE) || + check_jump_insn(func_addr)) { + ret = add_func(func_list, func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; + } + } else { + /* + * When disable, check for the function itself + * which to be unpatched. + */ + func_addr = ppc_function_entry((void *)func->new_func); + func_size = func->new_size; + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + } + +#ifdef PPC64_ELF_ABI_v1 + /* + * Check trampoline in stack + * new_func callchain: + * old_func + * -=> trampoline + * -=> new_func + * so, we should check all the func in the callchain + */ + if (func_addr != (unsigned long)func->old_func) { #ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - ret = add_func(func_list, func_addr, - func_size, "OLD_FUNC", 0); - if (ret) - return ret; + /* + * No scheduling point in the replacement + * instructions. Therefore, when preemption is + * not enabled, atomic execution is performed + * and these instructions will not appear on + * the stack. + */ + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + ret = add_func(func_list, func_addr, + func_size, "OLD_FUNC", 0); + if (ret) + return ret; #endif
- if (func_node == NULL || - func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) - continue; - - func_addr = (unsigned long)&func_node->arch_data.trampoline; - func_size = sizeof(struct ppc64_klp_btramp_entry); - ret = add_func(func_list, func_addr, - func_size, "trampoline", 0); - if (ret) - return ret; - } -#endif - } + if (func_node == NULL || + func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) + return 0; + + func_addr = (unsigned long)&func_node->arch_data.trampoline; + func_size = sizeof(struct ppc64_klp_btramp_entry); + ret = add_func(func_list, func_addr, + func_size, "trampoline", 0); + if (ret) + return ret; } +#endif return 0; }
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 04aa07e95f46..99b72629637d 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -52,7 +52,7 @@ static bool is_jump_insn(u8 *insn) return false; }
-static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { int len = JMP_E9_INSN_SIZE; struct insn insn; @@ -72,123 +72,6 @@ static bool check_jump_insn(unsigned long func_addr) return false; }
-int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct list_head *func_list) -{ - int ret; - struct klp_object *obj; - struct klp_func *func; - unsigned long func_addr = 0; - unsigned long func_size; - struct klp_func_node *func_node = NULL; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - unsigned long old_func = (unsigned long)func->old_func; - - func_node = klp_find_func_node(func->old_func); - /* Check func address in stack */ - if (enable) { - bool need_check_old = false; - - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - if (!func_node || - list_empty(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [the active one] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemtion is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - need_check_old = (func_addr != old_func); - } - if (need_check_old) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the function - * itself which to be unpatched. - */ - if (!func_node) - return -EINVAL; -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - if (func_addr != old_func) { - ret = add_func_to_list(check_funcs, &pcheck, old_func, - func->old_size, func->old_name, 0); - if (ret) - return ret; - } -#endif - - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - static void klp_print_stack_trace(void *trace_ptr, int trace_len) { int i; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index d47d243499b3..9284f5076858 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1344,11 +1344,106 @@ int __weak arch_klp_check_calltrace(bool (*fn)(void *, int *, unsigned long), vo return -EINVAL; }
-int __weak arch_klp_check_activeness_func(struct klp_patch *patch, int enable, +bool __weak arch_check_jump_insn(unsigned long func_addr) +{ + return true; +} + +int __weak arch_klp_check_activeness_func(struct klp_func *func, int enable, klp_add_func_t add_func, struct list_head *func_list) { - return -EINVAL; + int ret; + unsigned long func_addr = 0; + unsigned long func_size; + struct klp_func_node *func_node = NULL; + unsigned long old_func = (unsigned long)func->old_func; + + func_node = klp_find_func_node(func->old_func); + /* Check func address in stack */ + if (enable) { + if (func->patched || func->force == KLP_ENFORCEMENT) + return 0; + /* + * When enable, checking the currently active functions. + */ + if (list_empty(&func_node->func_stack)) { + /* + * Not patched on this function [the origin one] + */ + func_addr = old_func; + func_size = func->old_size; + } else { + /* + * Previously patched function [the active one] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + /* + * When preemption is disabled and the replacement area + * does not contain a jump instruction, the migration + * thread is scheduled to run stop machine only after the + * execution of instructions to be replaced is complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || + (func->force == KLP_NORMAL_FORCE) || + arch_check_jump_insn(func_addr)) { + ret = add_func(func_list, func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; + if (func_addr != old_func) { + ret = add_func(func_list, old_func, KLP_MAX_REPLACE_SIZE, + func->old_name, func->force); + if (ret) + return ret; + } + } + } else { +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement instructions. Therefore, + * when preemption is not enabled, atomic execution is performed + * and these instructions will not appear on the stack. + */ + if (list_is_singular(&func_node->func_stack)) { + func_addr = old_func; + func_size = func->old_size; + } else { + struct klp_func *prev; + + prev = list_first_or_null_rcu( + &func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + if (func_addr != old_func) { + ret = add_func(func_list, old_func, KLP_MAX_REPLACE_SIZE, + func->old_name, 0); + if (ret) + return ret; + } +#endif + + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + } + return 0; }
static inline unsigned long klp_size_to_check(unsigned long func_size, @@ -1410,12 +1505,31 @@ static void free_func_list(struct list_head *func_list) } }
+static int klp_check_activeness_func(struct klp_patch *patch, int enable, + struct list_head *func_list) +{ + int ret; + struct klp_object *obj = NULL; + struct klp_func *func = NULL; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + ret = arch_klp_check_activeness_func(func, enable, + add_func_to_list, + func_list); + if (ret) + return ret; + } + } + return 0; +} + static int klp_check_calltrace(struct klp_patch *patch, int enable) { int ret = 0; LIST_HEAD(func_list);
- ret = arch_klp_check_activeness_func(patch, enable, add_func_to_list, &func_list); + ret = klp_check_activeness_func(patch, enable, &func_list); if (ret) { pr_err("collect active functions failed, ret=%d\n", ret); goto out; @@ -2194,7 +2308,7 @@ static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) int i; int retry_cnt = 0;
- ret = arch_klp_check_activeness_func(patch, true, add_func_to_list, &func_list); + ret = klp_check_activeness_func(patch, true, &func_list); if (ret) { pr_err("break optimize collecting active functions failed, ret=%d\n", ret); goto out;
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
When run into arch_klp_check_activeness_func(), func_node corresponding to func->old_func has been stored in func->func_node and it must be valid, So no need to find func_node again or validate it again. __klp_enable_patch() klp_mem_prepare() func_node_alloc // 1. Alloc func->func_node for func->old_func klp_try_enable_patch() klp_check_calltrace() arch_klp_check_activeness_func() // 2. Access func_node found by func->old_func klp_breakpoint_optimize() klp_breakpoint_enable_patch() ... arch_klp_check_activeness_func() // 3. Access func_node found by func->old_func
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/powerpc/kernel/livepatch_64.c | 7 +++---- include/linux/livepatch.h | 1 - kernel/livepatch/core.c | 4 ++-- 3 files changed, 5 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 9a968c7616c7..9000534b3c9f 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -85,7 +85,7 @@ int arch_klp_check_activeness_func(struct klp_func *func, int enable, unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL;
- func_node = klp_find_func_node(func->old_func); + func_node = func->func_node; /* Check func address in stack */ if (enable) { if (func->patched || func->force == KLP_ENFORCEMENT) @@ -94,7 +94,7 @@ int arch_klp_check_activeness_func(struct klp_func *func, int enable, * When enable, checking the currently * active functions. */ - if (!func_node || list_empty(&func_node->func_stack)) { + if (list_empty(&func_node->func_stack)) { /* * No patched on this function * [ the origin one ] @@ -169,8 +169,7 @@ int arch_klp_check_activeness_func(struct klp_func *func, int enable, return ret; #endif
- if (func_node == NULL || - func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) + if (func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) return 0;
func_addr = (unsigned long)&func_node->arch_data.trampoline; diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 427485f73793..38d707b9b4e1 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -239,7 +239,6 @@ struct klp_func_node { void *brk_func; };
-struct klp_func_node *klp_find_func_node(const void *old_func); void klp_add_func_node(struct klp_func_node *func_node); void klp_del_func_node(struct klp_func_node *func_node); void *klp_get_brk_func(void *addr); diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 9284f5076858..2ccd00113085 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1359,7 +1359,7 @@ int __weak arch_klp_check_activeness_func(struct klp_func *func, int enable, struct klp_func_node *func_node = NULL; unsigned long old_func = (unsigned long)func->old_func;
- func_node = klp_find_func_node(func->old_func); + func_node = func->func_node; /* Check func address in stack */ if (enable) { if (func->patched || func->force == KLP_ENFORCEMENT) @@ -1551,7 +1551,7 @@ static LIST_HEAD(klp_func_list); * The caller must ensure that the klp_mutex lock is held or is in the rcu read * critical area. */ -struct klp_func_node *klp_find_func_node(const void *old_func) +static struct klp_func_node *klp_find_func_node(const void *old_func) { struct klp_func_node *func_node;
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
klp_add_func_node() & klp_del_func_node() should be static.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- include/linux/livepatch.h | 2 -- kernel/livepatch/core.c | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 38d707b9b4e1..b11d4afed635 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -239,8 +239,6 @@ struct klp_func_node { void *brk_func; };
-void klp_add_func_node(struct klp_func_node *func_node); -void klp_del_func_node(struct klp_func_node *func_node); void *klp_get_brk_func(void *addr);
static inline diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 2ccd00113085..fdf00f2a6d84 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1564,12 +1564,12 @@ static struct klp_func_node *klp_find_func_node(const void *old_func) return NULL; }
-void klp_add_func_node(struct klp_func_node *func_node) +static void klp_add_func_node(struct klp_func_node *func_node) { list_add_rcu(&func_node->node, &klp_func_list); }
-void klp_del_func_node(struct klp_func_node *func_node) +static void klp_del_func_node(struct klp_func_node *func_node) { list_del_rcu(&func_node->node); }
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Remove bracket in single branch of if statement.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/kernel/livepatch.c | 3 +-- arch/arm64/kernel/livepatch.c | 3 +-- arch/powerpc/kernel/livepatch_32.c | 3 +-- arch/powerpc/kernel/livepatch_64.c | 3 +-- 4 files changed, 4 insertions(+), 8 deletions(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 3379fbf16dd4..a5f4c770990f 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -75,9 +75,8 @@ bool arch_check_jump_insn(unsigned long func_addr) u32 *insn = (u32*)func_addr;
for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 6675569c8a4b..258f1dcda945 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -68,9 +68,8 @@ bool arch_check_jump_insn(unsigned long func_addr) u32 *insn = (u32*)func_addr;
for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 94aa7af22d34..0a3b484efbba 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -65,9 +65,8 @@ bool arch_check_jump_insn(unsigned long func_addr) u32 *insn = (u32*)func_addr;
for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 9000534b3c9f..894fadb647e8 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -70,9 +70,8 @@ static bool check_jump_insn(unsigned long func_addr) u32 *insn = (u32*)func_addr;
for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false;
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Definition of 'struct walk_stackframe_args' are the same in arm/arm64/powerpc32/powerpc64, so move it into include/linux/livepatch.h.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm/kernel/livepatch.c | 6 ------ arch/arm64/kernel/livepatch.c | 6 ------ arch/powerpc/kernel/livepatch_32.c | 6 ------ arch/powerpc/kernel/livepatch_64.c | 6 ------ include/linux/livepatch.h | 6 ++++++ 5 files changed, 6 insertions(+), 24 deletions(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index a5f4c770990f..f37cc04b4cae 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -63,12 +63,6 @@ static bool is_jump_insn(u32 insn) return false; }
-struct walk_stackframe_args { - void *data; - int ret; - bool (*check_func)(void *data, int *ret, unsigned long pc); -}; - bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 258f1dcda945..363fb8e41c49 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -56,12 +56,6 @@ static inline bool offset_in_range(unsigned long pc, unsigned long addr, ((le32_to_cpu(insn) & 0xfc000000) == 0x94000000) || \ ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800))
-struct walk_stackframe_args { - void *data; - int ret; - bool (*check_func)(void *data, int *ret, unsigned long pc); -}; - bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 0a3b484efbba..3134553820cd 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -53,12 +53,6 @@ static bool is_jump_insn(u32 insn) return false; }
-struct walk_stackframe_args { - void *data; - int ret; - bool (*check_func)(void *data, int *ret, unsigned long pc); -}; - bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 894fadb647e8..522e3d8e1fe6 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -58,12 +58,6 @@ static bool is_jump_insn(u32 insn) return false; }
-struct walk_stackframe_args { - void *data; - int ret; - bool (*check_func)(void *data, int *ret, unsigned long pc); -}; - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index b11d4afed635..a5ef153bae8d 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -260,6 +260,12 @@ typedef int (*klp_add_func_t)(struct list_head *func_list, unsigned long func_addr, unsigned long func_size, const char *func_name, int force);
+struct walk_stackframe_args { + void *data; + int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); +}; + #endif
int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
mainline inclusion from mainline-v6.10-rc1 commit e60b613df8b6253def41215402f72986fee3fc8d category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
------------------------------------------------------
KASAN reports a bug:
BUG: KASAN: use-after-free in ftrace_location+0x90/0x120 Read of size 8 at addr ffff888141d40010 by task insmod/424 CPU: 8 PID: 424 Comm: insmod Tainted: G W 6.9.0-rc2+ [...] Call Trace: <TASK> dump_stack_lvl+0x68/0xa0 print_report+0xcf/0x610 kasan_report+0xb5/0xe0 ftrace_location+0x90/0x120 register_kprobe+0x14b/0xa40 kprobe_init+0x2d/0xff0 [kprobe_example] do_one_initcall+0x8f/0x2d0 do_init_module+0x13a/0x3c0 load_module+0x3082/0x33d0 init_module_from_file+0xd2/0x130 __x64_sys_finit_module+0x306/0x440 do_syscall_64+0x68/0x140 entry_SYSCALL_64_after_hwframe+0x71/0x79
The root cause is that, in lookup_rec(), ftrace record of some address is being searched in ftrace pages of some module, but those ftrace pages at the same time is being freed in ftrace_release_mod() as the corresponding module is being deleted:
CPU1 | CPU2 register_kprobes() { | delete_module() { check_kprobe_address_safe() { | arch_check_ftrace_location() { | ftrace_location() { | lookup_rec() // USE! | ftrace_release_mod() // Free!
To fix this issue: 1. Hold rcu lock as accessing ftrace pages in ftrace_location_range(); 2. Use ftrace_location_range() instead of lookup_rec() in ftrace_location(); 3. Call synchronize_rcu() before freeing any ftrace pages both in ftrace_process_locs()/ftrace_release_mod()/ftrace_free_mem().
Link: https://lore.kernel.org/linux-trace-kernel/20240509192859.1273558-1-zhengyej...
Cc: stable@vger.kernel.org Cc: mhiramat@kernel.org Cc: mark.rutland@arm.com Cc: mathieu.desnoyers@efficios.com Fixes: ae6aa16fdc16 ("kprobes: introduce ftrace based optimization") Suggested-by: Steven Rostedt rostedt@goodmis.org Signed-off-by: Zheng Yejian zhengyejian1@huawei.com Signed-off-by: Steven Rostedt (Google) rostedt@goodmis.org Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- kernel/trace/ftrace.c | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b2888890add0..6b7deaaecc51 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1567,12 +1567,15 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) unsigned long ftrace_location_range(unsigned long start, unsigned long end) { struct dyn_ftrace *rec; + unsigned long ip = 0;
+ rcu_read_lock(); rec = lookup_rec(start, end); if (rec) - return rec->ip; + ip = rec->ip; + rcu_read_unlock();
- return 0; + return ip; }
/** @@ -1585,25 +1588,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end) */ unsigned long ftrace_location(unsigned long ip) { - struct dyn_ftrace *rec; + unsigned long loc; unsigned long offset; unsigned long size;
- rec = lookup_rec(ip, ip); - if (!rec) { + loc = ftrace_location_range(ip, ip); + if (!loc) { if (!kallsyms_lookup_size_offset(ip, &size, &offset)) goto out;
/* map sym+0 to __fentry__ */ if (!offset) - rec = lookup_rec(ip, ip + size - 1); + loc = ftrace_location_range(ip, ip + size - 1); }
- if (rec) - return rec->ip; - out: - return 0; + return loc; }
/** @@ -6347,6 +6347,8 @@ static int ftrace_process_locs(struct module *mod, /* We should have used all pages unless we skipped some */ if (pg_unuse) { WARN_ON(!skipped); + /* Need to synchronize with ftrace_location_range() */ + synchronize_rcu(); ftrace_free_pages(pg_unuse); } return ret; @@ -6529,6 +6531,9 @@ void ftrace_release_mod(struct module *mod) out_unlock: mutex_unlock(&ftrace_lock);
+ /* Need to synchronize with ftrace_location_range() */ + if (tmp_page) + synchronize_rcu(); for (pg = tmp_page; pg; pg = tmp_page) {
/* Needs to be called outside of ftrace_lock */ @@ -6851,6 +6856,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) unsigned long start = (unsigned long)(start_ptr); unsigned long end = (unsigned long)(end_ptr); struct ftrace_page **last_pg = &ftrace_pages_start; + struct ftrace_page *tmp_page = NULL; struct ftrace_page *pg; struct dyn_ftrace *rec; struct dyn_ftrace key; @@ -6894,12 +6900,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) ftrace_update_tot_cnt--; if (!pg->index) { *last_pg = pg->next; - if (pg->records) { - free_pages((unsigned long)pg->records, pg->order); - ftrace_number_of_pages -= 1 << pg->order; - } - ftrace_number_of_groups--; - kfree(pg); + pg->next = tmp_page; + tmp_page = pg; pg = container_of(last_pg, struct ftrace_page, next); if (!(*last_pg)) ftrace_pages = pg; @@ -6916,6 +6918,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) clear_func_from_hashes(func); kfree(func); } + /* Need to synchronize with ftrace_location_range() */ + if (tmp_page) { + synchronize_rcu(); + ftrace_free_pages(tmp_page); + } }
void __init ftrace_free_init_mem(void)
mainline inclusion from mainline-v6.9-rc4 commit 325f3fb551f8cd672dbbfc4cf58b14f9ee3fc9e8 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
------------------------------------------------------
When unloading a module, its state is changing MODULE_STATE_LIVE -> MODULE_STATE_GOING -> MODULE_STATE_UNFORMED. Each change will take a time. `is_module_text_address()` and `__module_text_address()` works with MODULE_STATE_LIVE and MODULE_STATE_GOING. If we use `is_module_text_address()` and `__module_text_address()` separately, there is a chance that the first one is succeeded but the next one is failed because module->state becomes MODULE_STATE_UNFORMED between those operations.
In `check_kprobe_address_safe()`, if the second `__module_text_address()` is failed, that is ignored because it expected a kernel_text address. But it may have failed simply because module->state has been changed to MODULE_STATE_UNFORMED. In this case, arm_kprobe() will try to modify non-exist module text address (use-after-free).
To fix this problem, we should not use separated `is_module_text_address()` and `__module_text_address()`, but use only `__module_text_address()` once and do `try_module_get(module)` which is only available with MODULE_STATE_LIVE.
Link: https://lore.kernel.org/all/20240410015802.265220-1-zhengyejian1@huawei.com/
Fixes: b983c2821236 ("kprobes: Forbid probing on trampoline and BPF code areas") Cc: stable@vger.kernel.org Signed-off-by: Zheng Yejian zhengyejian1@huawei.com Signed-off-by: Masami Hiramatsu (Google) mhiramat@kernel.org [Resolve conflicts due to lack dependency commit 223a76b268c9 ("kprobes: Fix coding style issues")] --- kernel/kprobes.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 325a81f9c38f..f29d7cc3eb01 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1646,10 +1646,17 @@ static int check_kprobe_address_safe(struct kprobe *p, jump_label_lock(); preempt_disable();
- /* Ensure it is not in reserved area nor out of text */ - if (!(core_kernel_text((unsigned long) p->addr) || - is_module_text_address((unsigned long) p->addr)) || - in_gate_area_no_mm((unsigned long) p->addr) || + /* Ensure the address is in a text area, and find a module if exists. */ + *probed_mod = NULL; + if (!core_kernel_text((unsigned long) p->addr)) { + *probed_mod = __module_text_address((unsigned long) p->addr); + if (!(*probed_mod)) { + ret = -EINVAL; + goto out; + } + } + /* Ensure it is not in reserved area. */ + if (in_gate_area_no_mm((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) || jump_label_text_reserved(p->addr, p->addr) || static_call_text_reserved(p->addr, p->addr) || @@ -1659,8 +1666,7 @@ static int check_kprobe_address_safe(struct kprobe *p, goto out; }
- /* Check if are we probing a module */ - *probed_mod = __module_text_address((unsigned long) p->addr); + /* Get module refcount and reject __init functions for loaded modules. */ if (*probed_mod) { /* * We must hold a refcount of the probed module while updating
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Kprobe and livepatch_wo may modify the first several instructions of a function at the same time which causing a conflict. Since dynamic ftrace reserve instructions at non-notrace functions, we can allow kprobe works on the reserved instructions and livepatch_wo work on other instructions so as to avoid the conflict. But note that we also do not allow both modify the same instruction when a function is marked as 'notrace' and without the reserved instructions.
Determining the order of locks to prevent deadlocks: kprobe_mutex -> klp_mutex -> cpus_read_lock -> text_mutex
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- include/linux/kprobes.h | 8 +++++ include/linux/livepatch.h | 13 ++++++++ kernel/kprobes.c | 25 +++++++++++++++- kernel/livepatch/Kconfig | 14 +++++++++ kernel/livepatch/core.c | 62 ++++++++++++++++++++++++++++++++++++++- 5 files changed, 120 insertions(+), 2 deletions(-)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 18b7c40ffb37..e2713fe9c100 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -496,6 +496,14 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void kprobes_lock(void); +void kprobes_unlock(void); +#else /* !CONFIG_LIVEPATCH_ISOLATE_KPROBE */ +static inline void kprobes_lock(void) { } +static inline void kprobes_unlock(void) { } +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + /* Returns true if kprobes handled the fault */ static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, unsigned int trap) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index a5ef153bae8d..ad402ea7a3e4 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -364,4 +364,17 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
#endif /* CONFIG_LIVEPATCH */
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void klp_lock(void); +void klp_unlock(void); +int klp_check_patched(unsigned long addr); +#else /* !CONFIG_LIVEPATCH_ISOLATE_KPROBE */ +static inline void klp_lock(void) { } +static inline void klp_unlock(void) { } +static inline int klp_check_patched(unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + #endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f29d7cc3eb01..f83b8b6b829f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -42,7 +42,9 @@ #include <asm/cacheflush.h> #include <asm/errno.h> #include <linux/uaccess.h> - +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +#include <linux/livepatch.h> +#endif #define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
@@ -1695,6 +1697,18 @@ static int check_kprobe_address_safe(struct kprobe *p, return ret; }
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void kprobes_lock(void) +{ + mutex_lock(&kprobe_mutex); +} + +void kprobes_unlock(void) +{ + mutex_unlock(&kprobe_mutex); +} +#endif + int register_kprobe(struct kprobe *p) { int ret; @@ -1722,6 +1736,12 @@ int register_kprobe(struct kprobe *p) return ret;
mutex_lock(&kprobe_mutex); +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + klp_lock(); + ret = klp_check_patched((unsigned long)p->addr); + if (ret) + goto out; +#endif
old_p = get_kprobe(p->addr); if (old_p) { @@ -1755,6 +1775,9 @@ int register_kprobe(struct kprobe *p) /* Try to optimize kprobe */ try_to_optimize_kprobe(p); out: +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + klp_unlock(); +#endif mutex_unlock(&kprobe_mutex);
if (probed_mod) diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index a59cbb6506cb..55f8ba227e6b 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -109,5 +109,19 @@ config LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE service downtime. Say N if you are unsure.
+config LIVEPATCH_ISOLATE_KPROBE + bool "Isolating livepatch and kprobe" + depends on LIVEPATCH_RESTRICT_KPROBE + depends on DYNAMIC_FTRACE && (X86_64 || ARM64) + default n + help + Kprobe and livepatch_wo may modify the first several instructions of + a function at the same time which causing a conflict. Since dynamic + ftrace reserve instructions at non-notrace functions, we can allow + kprobe works on the reserved instructions and livepatch_wo work on + other instructions so as to avoid the conflict. But note that we also + do not allow both modify the same instruction when a function is + marked as 'notrace' and without the reserved instructions. + endmenu endif diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index fdf00f2a6d84..3f3fa1c3c80c 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -97,6 +97,42 @@ static inline struct kprobe *klp_check_patch_kprobed(struct klp_patch *patch) } #endif /* CONFIG_LIVEPATCH_RESTRICT_KPROBE */
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE +void klp_lock(void) +{ + mutex_lock(&klp_mutex); +} + +void klp_unlock(void) +{ + mutex_unlock(&klp_mutex); +} + +int klp_check_patched(unsigned long addr) +{ + struct klp_patch *patch; + struct klp_object *obj; + struct klp_func *func; + + lockdep_assert_held(&klp_mutex); + list_for_each_entry(patch, &klp_patches, list) { + if (!patch->enabled) + continue; + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + unsigned long old_func = (unsigned long)func->old_func; + + if (addr >= old_func && addr < old_func + func->old_size) { + pr_err("func %pS has been livepatched\n", (void *)addr); + return -EINVAL; + } + } + } + } + return 0; +} +#endif /* CONFIG_LIVEPATCH_ISOLATE_KPROBE */ + static bool klp_is_module(struct klp_object *obj) { return obj->name; @@ -483,6 +519,9 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
patch = container_of(kobj, struct klp_patch, kobj);
+#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + kprobes_lock(); +#endif mutex_lock(&klp_mutex);
if (!klp_is_patch_registered(patch)) { @@ -507,7 +546,9 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
out: mutex_unlock(&klp_mutex); - +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + kprobes_unlock(); +#endif if (ret) return ret; return count; @@ -1028,6 +1069,11 @@ static int klp_init_object_loaded(struct klp_patch *patch, module_enable_ro(patch->mod, true);
klp_for_each_func(obj, func) { +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + unsigned long old_func; + unsigned long ftrace_loc; +#endif + ret = klp_find_object_symbol(obj->name, func->old_name, func->old_sympos, (unsigned long *)&func->old_func); @@ -1041,6 +1087,20 @@ static int klp_init_object_loaded(struct klp_patch *patch, func->old_name); return -ENOENT; } +#ifdef CONFIG_LIVEPATCH_ISOLATE_KPROBE + old_func = (unsigned long)func->old_func; + ftrace_loc = ftrace_location_range(old_func, old_func + func->old_size - 1); + if (ftrace_loc) { + if (WARN_ON(ftrace_loc < old_func || + ftrace_loc >= old_func + func->old_size - MCOUNT_INSN_SIZE)) { + pr_err("ftrace location for '%s' invalid", func->old_name); + return -EINVAL; + } + func->old_func = (void *)(ftrace_loc + MCOUNT_INSN_SIZE); + func->old_size -= ((unsigned long)func->old_func - old_func); + } +#endif + #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY if (func->old_size < KLP_MAX_REPLACE_SIZE) { pr_err("%s size less than limit (%lu < %zu)\n", func->old_name,
mainline inclusion from mainline-v5.16-rc1 commit 03bac0df2886882c43e6d0bfff9dee84a184fc7e category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
------------------------------------------------------
Introduce kretprobe_find_ret_addr() and is_kretprobe_trampoline(). These APIs will be used by the ORC stack unwinder and ftrace, so that they can check whether the given address points kretprobe trampoline code and query the correct return address in that case.
Signed-off-by: Masami Hiramatsu mhiramat@kernel.org Tested-by: Andrii Nakryiko andrii@kernel.org Signed-off-by: Steven Rostedt (VMware) rostedt@goodmis.org Conflicts: include/linux/kprobes.h kernel/kprobes.c [Reimplement kretprobe_find_ret_addr() and is_kretprobe_trampoline() without the dependcy commit d741bf41d7c7 ("kprobes: Remove kretprobe hash") and other refactors since those are too many changes] Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- include/linux/kprobes.h | 16 ++++++++++++++++ kernel/kprobes.c | 23 +++++++++++++++++++++++ 2 files changed, 39 insertions(+)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e2713fe9c100..28a6e871dac6 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -200,6 +200,12 @@ extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs); extern int arch_trampoline_kprobe(struct kprobe *p);
+void kretprobe_trampoline(void); +static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr) +{ + return !in_nmi() && (void *)addr == &kretprobe_trampoline; +} + /* If the trampoline handler called from a kprobe, use this version */ unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, void *trampoline_address, @@ -223,6 +229,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs, return ret; }
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp); #else /* CONFIG_KRETPROBES */ static inline void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) @@ -232,6 +239,15 @@ static inline int arch_trampoline_kprobe(struct kprobe *p) { return 0; } +static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr) +{ + return false; +} +static nokprobe_inline +unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp) +{ + return 0; +} #endif /* CONFIG_KRETPROBES */
extern struct kretprobe_blackpoint kretprobe_blacklist[]; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f83b8b6b829f..0ad524beaba2 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1984,6 +1984,29 @@ unsigned long __weak arch_deref_entry_point(void *entry)
#ifdef CONFIG_KRETPROBES
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp) +{ + struct kretprobe_instance *ri = NULL; + struct hlist_head *head; + unsigned long flags; + kprobe_opcode_t *correct_ret_addr = NULL; + + kretprobe_hash_lock(tsk, &head, &flags); + hlist_for_each_entry(ri, head, hlist) { + if (ri->task != tsk) + continue; + if (ri->fp != fp) + continue; + if (!is_kretprobe_trampoline((unsigned long)ri->ret_addr)) { + correct_ret_addr = ri->ret_addr; + break; + } + } + kretprobe_hash_unlock(tsk, &flags); + return (unsigned long)correct_ret_addr; +} +NOKPROBE_SYMBOL(kretprobe_find_ret_addr); + unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, void *trampoline_address, void *frame_pointer)
From: Masami Hiramatsu mhiramat@kernel.org
mainline inclusion from mainline-v5.16-rc1 commit 19138af1bd880d52318bbb164de72a482e59a45c category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
------------------------------------------------------
Since the kretprobe replaces the function return address with the kretprobe_trampoline on the stack, x86 unwinders can not continue the stack unwinding at that point, or record kretprobe_trampoline instead of correct return address.
To fix this issue, find the correct return address from task's kretprobe_instances as like as function-graph tracer does.
With this fix, the unwinder can correctly unwind the stack from kretprobe event on x86, as below.
<...>-135 [003] ...1 6.722338: r_full_proxy_read_0: (vfs_read+0xab/0x1a0 <- full_proxy_read) <...>-135 [003] ...1 6.722377: <stack trace> => kretprobe_trace_func+0x209/0x2f0 => kretprobe_dispatcher+0x4a/0x70 => __kretprobe_trampoline_handler+0xca/0x150 => trampoline_handler+0x44/0x70 => kretprobe_trampoline+0x2a/0x50 => vfs_read+0xab/0x1a0 => ksys_read+0x5f/0xe0 => do_syscall_64+0x33/0x40 => entry_SYSCALL_64_after_hwframe+0x44/0xae
Link: https://lkml.kernel.org/r/163163055130.489837.5161749078833497255.stgit@devn...
Reported-by: Daniel Xu dxu@dxuuu.xyz Signed-off-by: Masami Hiramatsu mhiramat@kernel.org Suggested-by: Josh Poimboeuf jpoimboe@redhat.com Tested-by: Andrii Nakryiko andrii@kernel.org Acked-by: Josh Poimboeuf jpoimboe@redhat.com Signed-off-by: Steven Rostedt (VMware) rostedt@goodmis.org Conflicts: arch/x86/include/asm/unwind.h [Adapt for calling kretprobe_find_ret_addr()] Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/x86/include/asm/unwind.h | 22 ++++++++++++++++++++++ arch/x86/kernel/unwind_frame.c | 3 +-- arch/x86/kernel/unwind_guess.c | 3 +-- arch/x86/kernel/unwind_orc.c | 21 +++++++++++++++++---- 4 files changed, 41 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 70fc159ebe69..1139b3cab818 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -4,6 +4,7 @@
#include <linux/sched.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> #include <asm/ptrace.h> #include <asm/stacktrace.h>
@@ -99,6 +100,27 @@ void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size) {} #endif
+static inline +unsigned long unwind_recover_kretprobe(struct unwind_state *state, + unsigned long addr, unsigned long *addr_p) +{ + return is_kretprobe_trampoline(addr) ? + kretprobe_find_ret_addr(state->task, addr_p) : + addr; +} + +/* Recover the return address modified by kretprobe and ftrace_graph. */ +static inline +unsigned long unwind_recover_ret_addr(struct unwind_state *state, + unsigned long addr, unsigned long *addr_p) +{ + unsigned long ret; + + ret = ftrace_graph_ret_addr(state->task, &state->graph_idx, + addr, addr_p); + return unwind_recover_kretprobe(state, ret, addr_p); +} + /* * This disables KASAN checking when reading a value from another task's stack, * since the other task could be running on another CPU and could have poisoned diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index d7c44b257f7f..8e1c50c86e5d 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -240,8 +240,7 @@ static bool update_stack_state(struct unwind_state *state, else { addr_p = unwind_get_return_address_ptr(state); addr = READ_ONCE_TASK_STACK(state->task, *addr_p); - state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, - addr, addr_p); + state->ip = unwind_recover_ret_addr(state, addr, addr_p); }
/* Save the original stack pointer for unwind_dump(): */ diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c index c49f10ffd8cd..884d68a6e714 100644 --- a/arch/x86/kernel/unwind_guess.c +++ b/arch/x86/kernel/unwind_guess.c @@ -15,8 +15,7 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
addr = READ_ONCE_NOCHECK(*state->sp);
- return ftrace_graph_ret_addr(state->task, &state->graph_idx, - addr, state->sp); + return unwind_recover_ret_addr(state, addr, state->sp); } EXPORT_SYMBOL_GPL(unwind_get_return_address);
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 9452ba625f67..e1105f169bcc 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -579,9 +579,8 @@ bool unwind_next_frame(struct unwind_state *state) if (!deref_stack_reg(state, ip_p, &state->ip)) goto err;
- state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, - state->ip, (void *)ip_p); - + state->ip = unwind_recover_ret_addr(state, state->ip, + (unsigned long *)ip_p); state->sp = sp; state->regs = NULL; state->prev_regs = NULL; @@ -594,7 +593,18 @@ bool unwind_next_frame(struct unwind_state *state) (void *)orig_ip); goto err; } - + /* + * There is a small chance to interrupt at the entry of + * __kretprobe_trampoline() where the ORC info doesn't exist. + * That point is right after the RET to __kretprobe_trampoline() + * which was modified return address. + * At that point, the @addr_p of the unwind_recover_kretprobe() + * (this has to point the address of the stack entry storing + * the modified return address) must be "SP - (a stack entry)" + * because SP is incremented by the RET. + */ + state->ip = unwind_recover_kretprobe(state, state->ip, + (unsigned long *)(state->sp - sizeof(long))); state->regs = (struct pt_regs *)sp; state->prev_regs = NULL; state->full_regs = true; @@ -607,6 +617,9 @@ bool unwind_next_frame(struct unwind_state *state) (void *)orig_ip); goto err; } + /* See UNWIND_HINT_TYPE_REGS case comment. */ + state->ip = unwind_recover_kretprobe(state, state->ip, + (unsigned long *)(state->sp - sizeof(long)));
if (state->full_regs) state->prev_regs = state->regs;
From: Masami Hiramatsu mhiramat@kernel.org
mainline inclusion from mainline-v5.16-rc1 commit 811b93ffaa488a4733270d8c8bc6c773334ab351 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Compile kretprobe related stacktrace entry recovery code and unwind_state::kr_cur field only when CONFIG_KRETPROBES=y.
Signed-off-by: Masami Hiramatsu mhiramat@kernel.org Signed-off-by: Steven Rostedt (VMware) rostedt@goodmis.org Conflicts: arch/x86/include/asm/unwind.h Fixes: b0c9f2f39283 ("x86/unwind: Recover kretprobe trampoline entry") Signed-off-by: Chen Zhongjin chenzhongjin@huawei.com Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/x86/include/asm/unwind.h | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 1139b3cab818..fbce1e35d406 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -104,9 +104,13 @@ static inline unsigned long unwind_recover_kretprobe(struct unwind_state *state, unsigned long addr, unsigned long *addr_p) { +#ifdef CONFIG_KRETPROBES return is_kretprobe_trampoline(addr) ? kretprobe_find_ret_addr(state->task, addr_p) : addr; +#else + return addr; +#endif }
/* Recover the return address modified by kretprobe and ftrace_graph. */
From: Masami Hiramatsu mhiramat@kernel.org
mainline inclusion from mainline-v5.16-rc1 commit cd9bc2c9258816dc934b300705076519d7375b81 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
------------------------------------------------------
Since the kretprobe replaces the function return address with the kretprobe_trampoline on the stack, stack unwinder shows it instead of the correct return address.
This checks whether the next return address is the __kretprobe_trampoline(), and if so, try to find the correct return address from the kretprobe instance list. For this purpose this adds 'kr_cur' loop cursor to memorize the current kretprobe instance.
With this fix, now arm64 can enable CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE, and pass the kprobe self tests.
Signed-off-by: Masami Hiramatsu mhiramat@kernel.org Acked-by: Will Deacon will@kernel.org Signed-off-by: Steven Rostedt (VMware) rostedt@goodmis.org Conflicts: arch/arm64/Kconfig arch/arm64/include/asm/stacktrace.h arch/arm64/kernel/stacktrace.c [Adapt for calling kretprobe_find_ret_addr()] Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm64/kernel/stacktrace.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index c445828ecc3a..2073a3a7fe75 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -101,6 +101,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = ret_stack->ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(frame->pc)) + frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp); +#endif
frame->pc = ptrauth_strip_insn_pac(frame->pc);
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9R2TB
--------------------------------
Defaultly enable CONFIG_LIVEPATCH_ISOLATE_KPROBE in openeuler_defconfig.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/arm64/configs/openeuler_defconfig | 1 + arch/x86/configs/openeuler_defconfig | 1 + 2 files changed, 2 insertions(+)
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 34061d75a0d2..6dead7235ec6 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -362,6 +362,7 @@ CONFIG_LIVEPATCH_WO_FTRACE=y CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y # CONFIG_LIVEPATCH_STACK is not set CONFIG_LIVEPATCH_RESTRICT_KPROBE=y +CONFIG_LIVEPATCH_ISOLATE_KPROBE=y # end of Enable Livepatch
# diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 1835f38f2947..3335c83a4ee7 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -505,6 +505,7 @@ CONFIG_LIVEPATCH_WO_FTRACE=y CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y # CONFIG_LIVEPATCH_STACK is not set CONFIG_LIVEPATCH_RESTRICT_KPROBE=y +CONFIG_LIVEPATCH_ISOLATE_KPROBE=y # end of Enable Livepatch # end of Processor type and features
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/7728 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/U...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/7728 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/U...