From: Zheng Yejian zhengyejian1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60MKD CVE: NA
--------------------------------
Fix several code style issues: - Do not use magic numbers.The number is 10 - Do not use parentheses when printing numbers. - Braces {} are not necessary for single statement blocks - Do not add blank lines on the start of a code block defined by braces.
Signed-off-by: Zheng Yejian zhengyejian1@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm/kernel/livepatch.c | 11 +---------- arch/arm64/kernel/livepatch.c | 15 +++------------ arch/powerpc/kernel/livepatch_32.c | 15 +++------------ arch/powerpc/kernel/livepatch_64.c | 13 ++----------- arch/x86/kernel/livepatch.c | 2 +- include/linux/livepatch.h | 16 ++++++++++++++++ kernel/livepatch/core.c | 4 +--- 7 files changed, 27 insertions(+), 49 deletions(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index bc09f338e713..b4d26474ba33 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -277,16 +277,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)do_check_calltrace; - } else if (strncmp(t->comm, "migration/", 10) == 0) { - /* - * current on other CPU - * we call this in stop_machine, so the current - * of each CPUs is mirgation, just compare the - * task_comm here, because we can't get the - * cpu_curr(task_cpu(t))). This assumes that no - * other thread will pretend to be a stopper via - * task_comm. - */ + } else if (klp_is_migration_thread(t->comm)) { continue; } else { frame.fp = thread_saved_fp(t); diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 8ec09c22dc26..6b5bcb491125 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -274,16 +274,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, /* current on this CPU */ frame.fp = (unsigned long)__builtin_frame_address(0); frame.pc = (unsigned long)do_check_calltrace; - } else if (strncmp(t->comm, "migration/", 10) == 0) { - /* - * current on other CPU - * we call this in stop_machine, so the current - * of each CPUs is mirgation, just compare the - * task_comm here, because we can't get the - * cpu_curr(task_cpu(t))). This assumes that no - * other thread will pretend to be a stopper via - * task_comm. - */ + } else if (klp_is_migration_thread(t->comm)) { continue; } else { frame.fp = thread_saved_fp(t); @@ -425,7 +416,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) for (i = 0; i < LJMP_INSN_SIZE; i++) { ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i]); if (ret) { - pr_err("patch instruction(%d) large range failed, ret=%d\n", + pr_err("patch instruction %d large range failed, ret=%d\n", i, ret); return -EPERM; } @@ -471,7 +462,7 @@ void arch_klp_unpatch_func(struct klp_func *func) ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, func_node->arch_data.old_insns[i]); if (ret) { - pr_err("restore instruction(%d) failed, ret=%d\n", i, ret); + pr_err("restore instruction %d failed, ret=%d\n", i, ret); return; } } diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 4eefae2f92dc..7b4ed23bf2ca 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -293,16 +293,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, * backtrace is so similar */ stack = (unsigned long *)current_stack_pointer; - } else if (strncmp(t->comm, "migration/", 10) == 0) { - /* - * current on other CPU - * we call this in stop_machine, so the current - * of each CPUs is mirgation, just compare the - * task_comm here, because we can't get the - * cpu_curr(task_cpu(t))). This assumes that no - * other thread will pretend to be a stopper via - * task_comm. - */ + } else if (klp_is_migration_thread(t->comm)) { continue; } else { /* @@ -440,7 +431,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), ppc_inst(insns[i])); if (ret) { - pr_err("patch instruction(%d) large range failed, ret=%d\n", + pr_err("patch instruction %d large range failed, ret=%d\n", i, ret); return -EPERM; } @@ -478,7 +469,7 @@ void arch_klp_unpatch_func(struct klp_func *func) ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), ppc_inst(func_node->arch_data.old_insns[i])); if (ret) { - pr_err("restore instruction(%d) failed, ret=%d\n", i, ret); + pr_err("restore instruction %d failed, ret=%d\n", i, ret); return; } } diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index aca7361ac12b..416f9f03d747 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -314,16 +314,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, * so similar */ stack = (unsigned long *)current_stack_pointer; - } else if (strncmp(t->comm, "migration/", 10) == 0) { - /* - * current on other CPU - * we call this in stop_machine, so the current - * of each CPUs is mirgation, just compare the - * task_comm here, because we can't get the - * cpu_curr(task_cpu(t))). This assumes that no - * other thread will pretend to be a stopper via - * task_comm. - */ + } else if (klp_is_migration_thread(t->comm)) { continue; } else { /* @@ -476,7 +467,7 @@ void arch_klp_unpatch_func(struct klp_func *func) ret = patch_instruction((struct ppc_inst *)((u32 *)pc + i), ppc_inst(func_node->arch_data.old_insns[i])); if (ret) { - pr_err("restore instruction(%d) failed, ret=%d\n", i, ret); + pr_err("restore instruction %d failed, ret=%d\n", i, ret); break; } } diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 5488bf014637..0241e560bd2e 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -315,7 +315,7 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da #endif
for_each_process_thread(g, t) { - if (!strncmp(t->comm, "migration/", 10)) + if (klp_is_migration_thread(t->comm)) continue;
#ifdef CONFIG_ARCH_STACKWALK diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9301f8e9bb90..56ad1c1dd83e 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -316,6 +316,22 @@ static inline bool klp_have_reliable_stack(void) { return true; } #define klp_smp_isb() #endif
+#define KLP_MIGRATION_NAME_PREFIX "migration/" +static inline bool klp_is_migration_thread(const char *task_name) +{ + /* + * current on other CPU + * we call this in stop_machine, so the current + * of each CPUs is migration, just compare the + * task_comm here, because we can't get the + * cpu_curr(task_cpu(t))). This assumes that no + * other thread will pretend to be a stopper via + * task_comm. + */ + return !strncmp(task_name, KLP_MIGRATION_NAME_PREFIX, + sizeof(KLP_MIGRATION_NAME_PREFIX) - 1); +} + #endif /* CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */
#else /* !CONFIG_LIVEPATCH */ diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index c8ef647c9cc4..9e65f6ae4061 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1054,10 +1054,9 @@ static int klp_init_object_loaded(struct klp_patch *patch, * feature 'function descriptor'), otherwise size found by * 'kallsyms_lookup_size_offset' may be abnormal. */ - if (func->old_name[0] != '.') { + if (func->old_name[0] != '.') pr_warn("old_name '%s' may miss the prefix '.', old_size=%lu\n", func->old_name, func->old_size); - } #endif
if (func->nop) @@ -1565,7 +1564,6 @@ static int klp_mem_prepare(struct klp_patch *patch)
static void remove_breakpoint(struct klp_func *func, bool restore) { - struct klp_func_node *func_node = klp_find_func_node(func->old_func); struct arch_klp_data *arch_data = &func_node->arch_data;